├── .github
└── workflows
│ ├── ci.yml
│ └── test.yml
├── .gitignore
├── .readthedocs.yml
├── LICENSE
├── MANIFEST.in
├── README.md
├── docs
├── MODULES.md
├── README.md
├── _config.yml
└── musicalgestures
│ ├── _360video.md
│ ├── _audio.md
│ ├── _blend.md
│ ├── _blurfaces.md
│ ├── _centerface.md
│ ├── _centroid.md
│ ├── _colored.md
│ ├── _cropping_window.md
│ ├── _cropvideo.md
│ ├── _directograms.md
│ ├── _filter.md
│ ├── _flow.md
│ ├── _grid.md
│ ├── _history.md
│ ├── _impacts.md
│ ├── _info.md
│ ├── _input_test.md
│ ├── _mglist.md
│ ├── _motionanalysis.md
│ ├── _motionvideo.md
│ ├── _motionvideo_mp_render.md
│ ├── _motionvideo_mp_run.md
│ ├── _pose.md
│ ├── _show.md
│ ├── _show_window.md
│ ├── _ssm.md
│ ├── _subtract.md
│ ├── _utils.md
│ ├── _video.md
│ ├── _videoadjust.md
│ ├── _videograms.md
│ ├── _videoreader.md
│ ├── _warp.md
│ ├── examples
│ ├── index.md
│ ├── test_dance.md
│ └── test_pianist.md
│ └── index.md
├── mkdocs.yml
├── musicalgestures
├── 3rdparty
│ └── windows
│ │ └── wget
│ │ └── wget.exe
├── MusicalGesturesToolbox.ipynb
├── _360video.py
├── __init__.py
├── _audio.py
├── _blend.py
├── _blurfaces.py
├── _centerface.py
├── _colored.py
├── _cropping_window.py
├── _cropvideo.py
├── _directograms.py
├── _filter.py
├── _flow.py
├── _grid.py
├── _history.py
├── _impacts.py
├── _info.py
├── _input_test.py
├── _mglist.py
├── _motionanalysis.py
├── _motionvideo.py
├── _motionvideo_mp_render.py
├── _motionvideo_mp_run.py
├── _pose.py
├── _show.py
├── _show_window.py
├── _ssm.py
├── _subtract.py
├── _utils.py
├── _video.py
├── _videoadjust.py
├── _videograms.py
├── _videoreader.py
├── _warp.py
├── deprecated
│ ├── _deprecated_cropvideo.py
│ ├── _deprecated_show.py
│ ├── _deprecated_utils.py
│ └── _motionhistory.py
├── documentation
│ └── figures
│ │ ├── logos
│ │ ├── RITMO_150px.png
│ │ └── UiO_150px.png
│ │ └── promo
│ │ ├── ipython_example.gif
│ │ ├── mgt-python-promo.odg
│ │ ├── mgt-python-promo_wide-crop.jpg
│ │ ├── mgt-python.png
│ │ ├── mgt-python_640.jpg
│ │ ├── mgt-python_new.png
│ │ ├── mgt-python_new_640.png
│ │ ├── notebook-middle.png
│ │ └── notebook-middle_150.jpg
├── examples
│ ├── Filtering.ipynb
│ ├── dancer.avi
│ ├── pianist.avi
│ ├── test_dance.py
│ └── test_pianist.py
├── models
│ └── centerface.onnx
└── pose
│ ├── body_25
│ └── pose_deploy.prototxt
│ ├── coco
│ └── pose_deploy_linevec.prototxt
│ ├── getBODY_25_here.bat
│ ├── getBODY_25_here.sh
│ ├── getBODY_25_remote.bat
│ ├── getBODY_25_remote.sh
│ ├── getBODY_25_remote_colab.sh
│ ├── getCOCO_here.bat
│ ├── getCOCO_here.sh
│ ├── getCOCO_remote.bat
│ ├── getCOCO_remote.sh
│ ├── getCOCO_remote_colab.sh
│ ├── getMPI_here.bat
│ ├── getMPI_here.sh
│ ├── getMPI_remote.bat
│ ├── getMPI_remote.sh
│ ├── getMPI_remote_colab.sh
│ └── mpi
│ ├── pose_deploy_linevec.prototxt
│ └── pose_deploy_linevec_faster_4_stages.prototxt
├── paper
├── figures
│ ├── keyframe-image.jpg
│ └── motiongram.jpg
├── paper.bib
└── paper.md
├── presentation
├── MusicalGesturesToolbox.html
└── README.md
├── setup.cfg
├── setup.py
├── tests
├── test_audio.py
├── test_average.py
├── test_centroid.py
├── test_init.py
├── test_motionvideo.py
├── test_ssm.py
├── test_utils.py
└── test_videograms.py
└── wiki_pics
├── average_example.png
├── blur_faces.gif
├── centroid-of-motion_640.jpg
├── chroma_ssm.png
├── colored_waveform.png
├── digital-video.png
├── directogram.png
├── filtering_filter02_mgx.png
├── filtering_filter02_mgy.png
├── filtering_filter10_mgx.png
├── filtering_filter10_mgy.png
├── filtering_filter50_mgx.png
├── filtering_filter50_mgy.png
├── filtering_nofilter_mgx.png
├── filtering_nofilter_mgy.png
├── flow_dense_example.gif
├── flow_sparse_example.png
├── heatmap_faces.png
├── history_example.png
├── hpss.png
├── impact_detection.png
├── impact_envelopes.png
├── ipb.png
├── motion-image_640.jpg
├── motion_average_example.png
├── motion_plots.png
├── motion_ssm.png
├── motiongram_640.jpg
├── motionhistory_back_and_forth.gif
├── pianist_descriptors.png
├── pianist_mgy.png
├── pianist_spectrogram.png
├── pianist_stacked_figures.png
├── pianist_tempogram.png
├── pianist_vgy.png
├── pianist_waveform.png
├── pose_example.png
├── quantity-of-motion_640.jpg
├── subtracted.gif
├── velocity.png
├── video_info_320.png
├── videogrid_example.png
└── warp_curve.png
/.github/workflows/ci.yml:
--------------------------------------------------------------------------------
1 | name: CI
2 |
3 | on:
4 | push:
5 | branches:
6 | - master
7 | pull_request:
8 | branches:
9 | - master
10 |
11 | jobs:
12 | build:
13 | name: "Python ${{ matrix.python-version }} on ${{ matrix.os }}"
14 | runs-on: ${{ matrix.os }}
15 |
16 | strategy:
17 | fail-fast: false
18 | matrix:
19 | os: [ubuntu-latest, macos-latest, windows-latest]
20 | python-version: ["3.8","3.9","3.10"]
21 |
22 | steps:
23 | - uses: actions/checkout@v3
24 |
25 | - name: Install FFmpeg and FFprobe
26 | uses: FedericoCarboni/setup-ffmpeg@v2
27 | id: setup-ffmpeg
28 |
29 | - name: Install Conda environment
30 | uses: conda-incubator/setup-miniconda@v3
31 | with:
32 | auto-update-conda: true
33 | python-version: ${{ matrix.python-version }}
34 |
35 | - name: Conda info
36 | shell: bash -l {0}
37 | run: |
38 | conda info -a
39 | conda list
40 |
41 | - name: Install musicalgestures
42 | shell: bash -l {0}
43 | run: python -m pip install musicalgestures
--------------------------------------------------------------------------------
/.github/workflows/test.yml:
--------------------------------------------------------------------------------
1 | name: Run Test with Pytest
2 |
3 | on: [push]
4 |
5 | jobs:
6 | build:
7 | runs-on: ubuntu-latest
8 | strategy:
9 | matrix:
10 | python-version: ["3.10"]
11 |
12 | steps:
13 | - uses: actions/checkout@v3
14 |
15 | - name: Update documentation files
16 | run: |
17 | pip install handsdown==1.1.0
18 | handsdown --external https://github.com/fourMs/MGT-python --branch master
19 |
20 | - uses: stefanzweifel/git-auto-commit-action@v2.3.0
21 | with:
22 | commit_message: Update documentation
23 | branch: ${{ github.ref }}
24 | env:
25 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
26 |
27 | - name: Install FFmpeg and FFprobe
28 | uses: FedericoCarboni/setup-ffmpeg@v2
29 | id: setup-ffmpeg
30 |
31 | - name: Set up Python ${{ matrix.python-version }}
32 | uses: actions/setup-python@v4
33 | with:
34 | python-version: ${{ matrix.python-version }}
35 |
36 | - name: Install musicalgestures
37 | shell: bash -l {0}
38 | run: python -m pip install musicalgestures
39 |
40 | - name: Lint with Ruff
41 | run: |
42 | pip install ruff
43 | ruff --output-format=github
44 | continue-on-error: true
45 |
46 | ########## TODO ############
47 | # - name: Test with pytest
48 | # shell: bash -l {0}
49 | # run: |
50 | # pip install pytest
51 | # python -m pytest -v
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | __pycache__/
2 |
3 | *.mp4
4 | *.csv
5 | *.eps
6 | *.tiff
7 | *.bmp
8 | *.DS_Store
9 | *.aux
10 | *.fdb_latexmk
11 | *.fls
12 | *.log
13 | *.synctex.gz
14 | *.txt
15 | *.tff
16 | *.tsv
17 | .ipynb_checkpoints
18 | */.ipynb_checkpoints/*
19 | fix_notebook.py
20 | read_readme.py
21 | dist
22 | build
23 | install_test_venv
24 | musicalgestures.egg-info
25 | test-videos
26 | .vscode/settings.json
27 | dance.avi
28 | musicalgestures/pose/mpi/pose_iter_160000.caffemodel
29 | musicalgestures/pose/coco/pose_iter_440000.caffemodel
30 | MusicalGesturesToolbox_devlocal.ipynb
31 | musicalgestures/interactive.py
32 | musicalgestures/pytest.ini
33 | pytest.ini
34 | tests/.coverage
35 | tests/.vs/slnx.sqlite
36 | tests/pytest.ini
37 | tests/htmlcov
38 | musicalgestures/pose/body_25/pose_iter_584000.caffemodel
39 | musicalgestures/pose/body_25/.wget-hsts
40 |
--------------------------------------------------------------------------------
/.readthedocs.yml:
--------------------------------------------------------------------------------
1 | # .readthedocs.yml
2 | # Read the Docs configuration file
3 | # See https://docs.readthedocs.io/en/stable/config-file/v2.html for details
4 |
5 | # Required
6 | version: 2
7 |
8 | # Set the version of Python and other tools you might need
9 | build:
10 | os: ubuntu-22.04
11 | tools:
12 | python: "3.11"
13 |
14 | # Build documentation with MkDocs
15 | mkdocs:
16 | configuration: "mkdocs.yml"
17 | fail_on_warning: false
18 |
19 | # Build all formats
20 | formats: all
21 |
--------------------------------------------------------------------------------
/MANIFEST.in:
--------------------------------------------------------------------------------
1 | include README.md
2 | include setup.cfg
3 | graft musicalgestures
4 | prune musicalgestures/deprecated
5 | prune musicalgestures/documentation
6 | prune musicalgestures/.ipynb_checkpoints
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # MGT-python
2 | [](https://pypi.org/project/musicalgestures)
3 | [](https://github.com/fourMs/MGT-python/blob/master/LICENSE)
4 | [](https://github.com/fourMs/MGT-python/actions/workflows/ci.yml)
5 | [](https://mgt-python.readthedocs.io/en/latest/?badge=latest)
6 |
7 | The Musical Gestures Toolbox for Python is a collection of tools for visualization and analysis of audio and video.
8 |
9 | 
10 |
11 | ## Usage
12 |
13 | The easiest way to get started is to take a look at the Jupyter notebook [MusicalGesturesToolbox](https://github.com/fourMs/MGT-python/blob/master/musicalgestures/MusicalGesturesToolbox.ipynb), which shows examples of the usage of the toolbox.
14 |
15 | [](https://colab.research.google.com/github/fourMs/MGT-python/blob/master/musicalgestures/MusicalGesturesToolbox.ipynb)
16 |
17 | The standard installation via `pip`: paste and execute the following code in the Terminal (OSX, Linux) or the PowerShell (Windows):
18 |
19 | `pip install musicalgestures`
20 |
21 | MGT is developed in Python 3 and relies on `FFmpeg` and `OpenCV`. See the [wiki documentation](https://github.com/fourMs/MGT-python/wiki#installation) for more details on the installation process.
22 |
23 | ## Description
24 |
25 | Watch a 10-minute introduction to the toolbox:
26 |
27 | [](https://youtu.be/tZVX_lDFrwc)
28 |
29 | MGT can generate both dynamic and static visualizations of video files, including motion videos, history videos, average images, motiongrams, and videograms. It can also extract various features from video files, including the quantity, centroid, and area of motion. The toolbox also integrates well with other libraries, such as OpenPose for skeleton tracking, and Librosa for audio analysis. All the features are described in the [wiki documentation](https://github.com/fourMs/MGT-python/wiki).
30 |
31 |
32 | ## History
33 |
34 | This toolbox builds on the [Musical Gestures Toolbox for Matlab](https://github.com/fourMs/MGT-matlab/), which again builds on the [Musical Gestures Toolbox for Max](https://www.uio.no/ritmo/english/research/labs/fourms/software/musicalgesturestoolbox/mgt-max/).
35 |
36 | The software is currently maintained by the [fourMs lab](https://github.com/fourMs) at [RITMO Centre for Interdisciplinary Studies in Rhythm, Time and Motion](https://www.uio.no/ritmo/english/) at the University of Oslo.
37 |
38 | ## Reference
39 |
40 | If you use this toolbox in your research, please cite this article:
41 |
42 | - Laczkó, B., & Jensenius, A. R. (2021). [Reflections on the Development of the Musical Gestures Toolbox for Python](https://www.duo.uio.no/bitstream/handle/10852/89331/Laczk%25C3%25B3_et_al_2021_Reflections_on_the_Development_of_the.pdf?sequence=2&isAllowed=y). *Proceedings of the Nordic Sound and Music Computing Conference*, Copenhagen.
43 |
44 |
45 | ## Credits
46 |
47 | Developers: [Balint Laczko](https://github.com/balintlaczko), [Joachim Poutaraud](https://github.com/joachimpoutaraud), [Frida Furmyr](https://github.com/fridafu), [Marcus Widmer](https://github.com/marcuswidmer), [Alexander Refsum Jensenius](https://github.com/alexarje/)
48 |
49 | ## License
50 |
51 | This toolbox is released under the [GNU General Public License 3.0 license](https://www.gnu.org/licenses/gpl-3.0.en.html).
52 |
--------------------------------------------------------------------------------
/docs/MODULES.md:
--------------------------------------------------------------------------------
1 | # Mgt-python Modules
2 |
3 | > Auto-generated documentation modules index.
4 |
5 | Full list of [Mgt-python](README.md#mgt-python) project modules.
6 |
7 | - [MGT-python](README.md#mgt-python)
8 | - [360video](musicalgestures/_360video.md#360video)
9 | - [Musicalgestures](musicalgestures/index.md#musicalgestures)
10 | - [Audio](musicalgestures/_audio.md#audio)
11 | - [Blend](musicalgestures/_blend.md#blend)
12 | - [Blurfaces](musicalgestures/_blurfaces.md#blurfaces)
13 | - [CenterFace](musicalgestures/_centerface.md#centerface)
14 | - [Colored](musicalgestures/_colored.md#colored)
15 | - [Cropping Window](musicalgestures/_cropping_window.md#cropping-window)
16 | - [Cropvideo](musicalgestures/_cropvideo.md#cropvideo)
17 | - [Directograms](musicalgestures/_directograms.md#directograms)
18 | - [Filter](musicalgestures/_filter.md#filter)
19 | - [Flow](musicalgestures/_flow.md#flow)
20 | - [Grid](musicalgestures/_grid.md#grid)
21 | - [History](musicalgestures/_history.md#history)
22 | - [Impacts](musicalgestures/_impacts.md#impacts)
23 | - [Info](musicalgestures/_info.md#info)
24 | - [Input Test](musicalgestures/_input_test.md#input-test)
25 | - [MgList](musicalgestures/_mglist.md#mglist)
26 | - [Motionanalysis](musicalgestures/_motionanalysis.md#motionanalysis)
27 | - [Motionvideo](musicalgestures/_motionvideo.md#motionvideo)
28 | - [Motionvideo Mp Render](musicalgestures/_motionvideo_mp_render.md#motionvideo-mp-render)
29 | - [Motionvideo Mp Run](musicalgestures/_motionvideo_mp_run.md#motionvideo-mp-run)
30 | - [Pose](musicalgestures/_pose.md#pose)
31 | - [Show](musicalgestures/_show.md#show)
32 | - [Show Window](musicalgestures/_show_window.md#show-window)
33 | - [Ssm](musicalgestures/_ssm.md#ssm)
34 | - [Subtract](musicalgestures/_subtract.md#subtract)
35 | - [Utils](musicalgestures/_utils.md#utils)
36 | - [Video](musicalgestures/_video.md#video)
37 | - [Videoadjust](musicalgestures/_videoadjust.md#videoadjust)
38 | - [Videograms](musicalgestures/_videograms.md#videograms)
39 | - [Videoreader](musicalgestures/_videoreader.md#videoreader)
40 | - [Warp](musicalgestures/_warp.md#warp)
41 |
--------------------------------------------------------------------------------
/docs/README.md:
--------------------------------------------------------------------------------
1 | # MGT-python
2 |
3 | > Auto-generated documentation index.
4 |
5 | [](https://pypi.org/project/musicalgestures)
6 | [](https://github.com/fourMs/MGT-python/blob/master/LICENSE)
7 | [](https://github.com/fourMs/MGT-python/actions/workflows/ci.yml)
8 | [](https://mgt-python.readthedocs.io/en/latest/?badge=latest)
9 |
10 | Full Mgt-python project documentation can be found in [Modules](MODULES.md#mgt-python-modules)
11 |
12 | - [MGT-python](#mgt-python)
13 | - [Usage](#usage)
14 | - [Description](#description)
15 | - [History](#history)
16 | - [Reference](#reference)
17 | - [Credits](#credits)
18 | - [License](#license)
19 | - [Mgt-python Modules](MODULES.md#mgt-python-modules)
20 |
21 | The Musical Gestures Toolbox for Python is a collection of tools for visualization and analysis of audio and video.
22 |
23 | 
24 |
25 | ## Usage
26 |
27 | The easiest way to get started is to take a look at the Jupyter notebook [MusicalGesturesToolbox](https://github.com/fourMs/MGT-python/blob/master/musicalgestures/MusicalGesturesToolbox.ipynb), which shows examples of the usage of the toolbox.
28 |
29 | [](https://colab.research.google.com/github/fourMs/MGT-python/blob/master/musicalgestures/MusicalGesturesToolbox.ipynb)
30 |
31 | The standard installation via `pip`: paste and execute the following code in the Terminal (OSX, Linux) or the PowerShell (Windows):
32 |
33 | `pip install musicalgestures`
34 |
35 | MGT is developed in Python 3 and relies on `FFmpeg` and `OpenCV`. See the [wiki documentation](https://github.com/fourMs/MGT-python/wiki#installation) for more details on the installation process.
36 |
37 | ## Description
38 |
39 | Watch a 10-minute introduction to the toolbox:
40 |
41 | [](https://youtu.be/tZVX_lDFrwc)
42 |
43 | MGT can generate both dynamic and static visualizations of video files, including motion videos, history videos, average images, motiongrams, and videograms. It can also extract various features from video files, including the quantity, centroid, and area of motion. The toolbox also integrates well with other libraries, such as OpenPose for skeleton tracking, and Librosa for audio analysis. All the features are described in the [wiki documentation](https://github.com/fourMs/MGT-python/wiki).
44 |
45 | ## History
46 |
47 | This toolbox builds on the [Musical Gestures Toolbox for Matlab](https://github.com/fourMs/MGT-matlab/), which again builds on the [Musical Gestures Toolbox for Max](https://www.uio.no/ritmo/english/research/labs/fourms/software/musicalgesturestoolbox/mgt-max/).
48 |
49 | The software is currently maintained by the [fourMs lab](https://github.com/fourMs) at [RITMO Centre for Interdisciplinary Studies in Rhythm, Time and Motion](https://www.uio.no/ritmo/english/) at the University of Oslo.
50 |
51 | ## Reference
52 |
53 | If you use this toolbox in your research, please cite this article:
54 |
55 | - Laczkó, B., & Jensenius, A. R. (2021). [Reflections on the Development of the Musical Gestures Toolbox for Python](https://www.duo.uio.no/bitstream/handle/10852/89331/Laczk%25C3%25B3_et_al_2021_Reflections_on_the_Development_of_the.pdf?sequence=2&isAllowed=y). *Proceedings of the Nordic Sound and Music Computing Conference*, Copenhagen.
56 |
57 | ## Credits
58 |
59 | Developers: [Balint Laczko](https://github.com/balintlaczko), [Joachim Poutaraud](https://github.com/joachimpoutaraud), [Frida Furmyr](https://github.com/fridafu), [Marcus Widmer](https://github.com/marcuswidmer), [Alexander Refsum Jensenius](https://github.com/alexarje/)
60 |
61 | ## License
62 |
63 | This toolbox is released under the [GNU General Public License 3.0 license](https://www.gnu.org/licenses/gpl-3.0.en.html).
64 |
--------------------------------------------------------------------------------
/docs/_config.yml:
--------------------------------------------------------------------------------
1 | theme: jekyll-theme-cayman
2 | show_downloads: true
3 | source: "https://github.com/fourMs/MGT-python/"
4 | highlighter: rouge
5 |
--------------------------------------------------------------------------------
/docs/musicalgestures/_360video.md:
--------------------------------------------------------------------------------
1 | # 360video
2 |
3 | > Auto-generated documentation for [musicalgestures._360video](https://github.com/fourMs/MGT-python/blob/master/musicalgestures/_360video.py) module.
4 |
5 | - [Mgt-python](../README.md#mgt-python) / [Modules](../MODULES.md#mgt-python-modules) / [Musicalgestures](index.md#musicalgestures) / 360video
6 | - [Mg360Video](#mg360video)
7 | - [Mg360Video().convert_projection](#mg360videoconvert_projection)
8 | - [Projection](#projection)
9 |
10 | ## Mg360Video
11 |
12 | [[find in source code]](https://github.com/fourMs/MGT-python/blob/master/musicalgestures/_360video.py#L90)
13 |
14 | ```python
15 | class Mg360Video(MgVideo):
16 | def __init__(
17 | filename: str,
18 | projection: str | Projection,
19 | camera: str = None,
20 | **kwargs,
21 | ):
22 | ```
23 |
24 | Class for 360 videos.
25 |
26 | #### See also
27 |
28 | - [MgVideo](_video.md#mgvideo)
29 |
30 | ### Mg360Video().convert_projection
31 |
32 | [[find in source code]](https://github.com/fourMs/MGT-python/blob/master/musicalgestures/_360video.py#L122)
33 |
34 | ```python
35 | def convert_projection(
36 | target_projection: Projection | str,
37 | options: dict[str, str] = None,
38 | print_cmd: bool = False,
39 | ):
40 | ```
41 |
42 | Convert the video to a different projection.
43 |
44 | #### Arguments
45 |
46 | - `target_projection` *Projection* - Target projection.
47 | options (dict[str, str], optional): Options for the conversion. Defaults to None.
48 | - `print_cmd` *bool, optional* - Print the ffmpeg command. Defaults to False.
49 |
50 | ## Projection
51 |
52 | [[find in source code]](https://github.com/fourMs/MGT-python/blob/master/musicalgestures/_360video.py#L8)
53 |
54 | ```python
55 | class Projection(Enum):
56 | ```
57 |
58 | same as https://ffmpeg.org/ffmpeg-filters.html#v360.
59 |
--------------------------------------------------------------------------------
/docs/musicalgestures/_blend.md:
--------------------------------------------------------------------------------
1 | # Blend
2 |
3 | > Auto-generated documentation for [musicalgestures._blend](https://github.com/fourMs/MGT-python/blob/master/musicalgestures/_blend.py) module.
4 |
5 | - [Mgt-python](../README.md#mgt-python) / [Modules](../MODULES.md#mgt-python-modules) / [Musicalgestures](index.md#musicalgestures) / Blend
6 | - [mg_blend_image](#mg_blend_image)
7 |
8 | ## mg_blend_image
9 |
10 | [[find in source code]](https://github.com/fourMs/MGT-python/blob/master/musicalgestures/_blend.py#L7)
11 |
12 | ```python
13 | def mg_blend_image(
14 | self,
15 | filename=None,
16 | mode='all_mode',
17 | component_mode='average',
18 | target_name=None,
19 | overwrite=False,
20 | ):
21 | ```
22 |
23 | Finds and saves a blended image of an input video file using FFmpeg.
24 | The FFmpeg tblend (time blend) filter takes two consecutive frames from one single stream, and outputs the result obtained by blending the new frame on top of the old frame.
25 |
26 | #### Arguments
27 |
28 | - `filename` *str, optional* - Path to the input video file. If None, the video file of the MgObject is used. Defaults to None.
29 | - `mode` *str, optional* - Set blend mode for specific pixel component or all pixel components. Accepted options are 'c0_mode', 'c1_mode', c2_mode', 'c3_mode' and 'all_mode'. Defaults to 'all_mode'.
30 | - `component_mode` *str, optional* - Component mode of the FFmpeg tblend. Available values for component modes can be accessed here: https://ffmpeg.org/ffmpeg-filters.html#blend-1. Defaults to 'average'.
31 | - `target_name` *str, optional* - The name of the output video. Defaults to None (which assumes that the input filename with the component mode suffix should be used).
32 | - `overwrite` *bool, optional* - Whether to allow overwriting existing files or to automatically increment target filenames to avoid overwriting. Defaults to False.
33 |
34 | #### Returns
35 |
36 | - `MgImage` - A new MgImage pointing to the output image file.
37 |
--------------------------------------------------------------------------------
/docs/musicalgestures/_blurfaces.md:
--------------------------------------------------------------------------------
1 | # Blurfaces
2 |
3 | > Auto-generated documentation for [musicalgestures._blurfaces](https://github.com/fourMs/MGT-python/blob/master/musicalgestures/_blurfaces.py) module.
4 |
5 | - [Mgt-python](../README.md#mgt-python) / [Modules](../MODULES.md#mgt-python-modules) / [Musicalgestures](index.md#musicalgestures) / Blurfaces
6 | - [centroid_mask](#centroid_mask)
7 | - [heatmap_data](#heatmap_data)
8 | - [mg_blurfaces](#mg_blurfaces)
9 | - [nearest_neighbours](#nearest_neighbours)
10 | - [scaling_mask](#scaling_mask)
11 |
12 | ## centroid_mask
13 |
14 | [[find in source code]](https://github.com/fourMs/MGT-python/blob/master/musicalgestures/_blurfaces.py#L40)
15 |
16 | ```python
17 | def centroid_mask(data):
18 | ```
19 |
20 | ## heatmap_data
21 |
22 | [[find in source code]](https://github.com/fourMs/MGT-python/blob/master/musicalgestures/_blurfaces.py#L51)
23 |
24 | ```python
25 | def heatmap_data(data, resolution, data_min, data_max):
26 | ```
27 |
28 | ## mg_blurfaces
29 |
30 | [[find in source code]](https://github.com/fourMs/MGT-python/blob/master/musicalgestures/_blurfaces.py#L71)
31 |
32 | ```python
33 | def mg_blurfaces(
34 | self,
35 | mask='blur',
36 | mask_image=None,
37 | mask_scale=1.0,
38 | ellipse=True,
39 | draw_heatmap=False,
40 | neighbours=32,
41 | resolution=250,
42 | draw_scores=False,
43 | save_data=True,
44 | data_format='csv',
45 | color=(0, 0, 0),
46 | target_name=None,
47 | overwrite=False,
48 | ):
49 | ```
50 |
51 | Automatic anonymization of faces in videos.
52 | This function works by first detecting all human faces in each video frame and then applying an anonymization filter
53 | (blurring, black rectangles or images) on each detected face region.
54 |
55 | Credits: `centerface.onnx` (original) and `centerface.py` are based on https://github.com/Star-Clouds/centerface (revision 8c39a49), released under [MIT license](https://github.com/Star-Clouds/CenterFace/blob/36afed/LICENSE).
56 |
57 | #### Arguments
58 |
59 | - `mask` *str, optional* - Mask filter mode for face regions. 'blur' applies a strong gaussian blurring, 'rectangle' draws a solid black box, 'image' replaces the face with a custom image and 'none' does leaves the input unchanged. Defaults to 'blur'.
60 | - `mask_image` *str, optional* - Anonymization image path which can be used for masking face regions. This can be activated by specifying 'image' in the mask parameter. Defaults to None.
61 | - `mask_scale` *float, optional* - Scale factor for face masks, to make sure that the masks cover the complete face. Defaults to 1.0.
62 | - `ellipse` *bool, optional* - Mask faces with blurred ellipses. Defaults to True.
63 | - `draw_heatmap` *bool, optional* - Draw heatmap of the detected faces using the centroid of the face mask. Defaults to False.
64 | - `neighbours` *int, optional* - Number of neighbours for smoothing the heatmap image. Defaults to 32.
65 | - `resolution` *int, optional* - Number of pixel resolution for the heatmap visualization. Defaults to 250.
66 | - `draw_scores` *bool, optional* - Draw detection faceness scores onto outputs (a score between 0 and 1 that roughly corresponds to the detector's confidence that something is a face). Defaults to False.
67 | - `save_data` *bool, optional* - Whether to save the scaled coordinates of the face mask (time (ms), x1, y1, x2, y2) for each frame to a file. Defaults to True.
68 | - `data_format` *str, optional* - Specifies format of blur_faces-data. Accepted values are 'csv', 'tsv' and 'txt'. For multiple output formats, use list, e.g. ['csv', 'txt']. Defaults to 'csv'.
69 | - `color` *tuple, optional* - Customized color of the rectangle boxes. Defaults to black (0, 0, 0).
70 | - `target_name` *str, optional* - Target output name. Defaults to None (which assumes that the input filename with the suffix "_blurred" should be used).
71 | - `overwrite` *bool, optional* - Whether to allow overwriting existing files or to automatically increment target filenames to avoid overwriting. Defaults to False.
72 |
73 | #### Returns
74 |
75 | - `MgVideo` - A MgVideo as blur_faces for parent MgVideo
76 |
77 | ## nearest_neighbours
78 |
79 | [[find in source code]](https://github.com/fourMs/MGT-python/blob/master/musicalgestures/_blurfaces.py#L56)
80 |
81 | ```python
82 | def nearest_neighbours(x, y, width, height, resolution, n_neighbours):
83 | ```
84 |
85 | ## scaling_mask
86 |
87 | [[find in source code]](https://github.com/fourMs/MGT-python/blob/master/musicalgestures/_blurfaces.py#L18)
88 |
89 | ```python
90 | def scaling_mask(x1, y1, x2, y2, mask_scale=1.0):
91 | ```
92 |
93 | Scale factor for face masks, to make sure that the masks cover the complete face.
94 |
95 | #### Arguments
96 |
97 | - `x1` *int* - X start coordinate value
98 | - `y1` *int* - Y start coordinate value
99 | - `x2` *int* - X end coordinate value
100 | - `y2` *int* - Y end coordinate value
101 | - `mask_scale` *float, optional* - Scale factor for adjusting the size of the face masks. Defaults to 1.0.
102 |
103 | #### Returns
104 |
105 | [x1, y1, x2, y2]: A list of intergers corresponding to the scaled coordinates of the face masks.
106 |
--------------------------------------------------------------------------------
/docs/musicalgestures/_centerface.md:
--------------------------------------------------------------------------------
1 | # CenterFace
2 |
3 | > Auto-generated documentation for [musicalgestures._centerface](https://github.com/fourMs/MGT-python/blob/master/musicalgestures/_centerface.py) module.
4 |
5 | - [Mgt-python](../README.md#mgt-python) / [Modules](../MODULES.md#mgt-python-modules) / [Musicalgestures](index.md#musicalgestures) / CenterFace
6 | - [CenterFace](#centerface)
7 | - [CenterFace().decode](#centerfacedecode)
8 | - [CenterFace().inference_opencv](#centerfaceinference_opencv)
9 | - [CenterFace().nms](#centerfacenms)
10 | - [CenterFace().postprocess](#centerfacepostprocess)
11 | - [CenterFace().transform](#centerfacetransform)
12 |
13 | ## CenterFace
14 |
15 | [[find in source code]](https://github.com/fourMs/MGT-python/blob/master/musicalgestures/_centerface.py#L7)
16 |
17 | ```python
18 | class CenterFace(object):
19 | def __init__(landmarks=True):
20 | ```
21 |
22 | ### CenterFace().decode
23 |
24 | [[find in source code]](https://github.com/fourMs/MGT-python/blob/master/musicalgestures/_centerface.py#L53)
25 |
26 | ```python
27 | def decode(heatmap, scale, offset, landmark, size, threshold=0.1):
28 | ```
29 |
30 | ### CenterFace().inference_opencv
31 |
32 | [[find in source code]](https://github.com/fourMs/MGT-python/blob/master/musicalgestures/_centerface.py#L21)
33 |
34 | ```python
35 | def inference_opencv(img, threshold):
36 | ```
37 |
38 | ### CenterFace().nms
39 |
40 | [[find in source code]](https://github.com/fourMs/MGT-python/blob/master/musicalgestures/_centerface.py#L87)
41 |
42 | ```python
43 | def nms(boxes, scores, nms_thresh):
44 | ```
45 |
46 | ### CenterFace().postprocess
47 |
48 | [[find in source code]](https://github.com/fourMs/MGT-python/blob/master/musicalgestures/_centerface.py#L35)
49 |
50 | ```python
51 | def postprocess(heatmap, lms, offset, scale, threshold):
52 | ```
53 |
54 | ### CenterFace().transform
55 |
56 | [[find in source code]](https://github.com/fourMs/MGT-python/blob/master/musicalgestures/_centerface.py#L30)
57 |
58 | ```python
59 | def transform(h, w):
60 | ```
61 |
--------------------------------------------------------------------------------
/docs/musicalgestures/_centroid.md:
--------------------------------------------------------------------------------
1 | # Centroid
2 |
3 | > Auto-generated documentation for [musicalgestures._centroid](https://github.com/fourMs/MGT-python/blob/master/musicalgestures/_centroid.py) module.
4 |
5 | - [Mgt-python](../README.md#mgt-python) / [Modules](../MODULES.md#mgt-python-modules) / [Musicalgestures](index.md#musicalgestures) / Centroid
6 | - [centroid](#centroid)
7 |
8 | ## centroid
9 |
10 | [[find in source code]](https://github.com/fourMs/MGT-python/blob/master/musicalgestures/_centroid.py#L5)
11 |
12 | ```python
13 | def centroid(image, width, height):
14 | ```
15 |
16 | Computes the centroid of an image or frame.
17 |
18 | #### Arguments
19 |
20 | - `image` *np.array(uint8)* - The input image matrix for the centroid estimation function.
21 | - `width` *int* - The pixel width of the input video capture.
22 | - `height` *int* - The pixel height of the input video capture.
23 |
24 | #### Returns
25 |
26 | - `np.array(2)` - X and Y coordinates of the centroid of motion.
27 | - `int` - Quantity of motion: How large the change was in pixels.
28 |
--------------------------------------------------------------------------------
/docs/musicalgestures/_colored.md:
--------------------------------------------------------------------------------
1 | # Colored
2 |
3 | > Auto-generated documentation for [musicalgestures._colored](https://github.com/fourMs/MGT-python/blob/master/musicalgestures/_colored.py) module.
4 |
5 | - [Mgt-python](../README.md#mgt-python) / [Modules](../MODULES.md#mgt-python-modules) / [Musicalgestures](index.md#musicalgestures) / Colored
6 | - [MgAudioProcessor](#mgaudioprocessor)
7 | - [MgAudioProcessor().peaks](#mgaudioprocessorpeaks)
8 | - [MgAudioProcessor().read_samples](#mgaudioprocessorread_samples)
9 | - [MgAudioProcessor().spectral_centroid](#mgaudioprocessorspectral_centroid)
10 | - [MgWaveformImage](#mgwaveformimage)
11 | - [MgWaveformImage().draw_peaks](#mgwaveformimagedraw_peaks)
12 | - [MgWaveformImage().interpolate_colors](#mgwaveformimageinterpolate_colors)
13 | - [min_max_level](#min_max_level)
14 |
15 | ## MgAudioProcessor
16 |
17 | [[find in source code]](https://github.com/fourMs/MGT-python/blob/master/musicalgestures/_colored.py#L25)
18 |
19 | ```python
20 | class MgAudioProcessor(object):
21 | def __init__(
22 | filename,
23 | n_fft,
24 | fmin,
25 | fmax=None,
26 | window_function=np.hanning,
27 | ):
28 | ```
29 |
30 | ### MgAudioProcessor().peaks
31 |
32 | [[find in source code]](https://github.com/fourMs/MGT-python/blob/master/musicalgestures/_colored.py#L134)
33 |
34 | ```python
35 | def peaks(start_seek, end_seek, block_size=1024):
36 | ```
37 |
38 | Read all samples between start_seek and end_seek, then find the minimum and maximum peak
39 | in that range. Returns that pair in the order they were found. So if min was found first,
40 | it returns (min, max) else the other way around.
41 |
42 | ### MgAudioProcessor().read_samples
43 |
44 | [[find in source code]](https://github.com/fourMs/MGT-python/blob/master/musicalgestures/_colored.py#L70)
45 |
46 | ```python
47 | def read_samples(start, size, resize_if_less=False):
48 | ```
49 |
50 | Read size samples starting at start, if resize_if_less is True and less than size
51 | samples are read, resize the array to size and fill with zeros
52 |
53 | ### MgAudioProcessor().spectral_centroid
54 |
55 | [[find in source code]](https://github.com/fourMs/MGT-python/blob/master/musicalgestures/_colored.py#L118)
56 |
57 | ```python
58 | def spectral_centroid(seek_point):
59 | ```
60 |
61 | Starting at seek_point to read n_fft samples and calculate the spectral centroid
62 |
63 | ## MgWaveformImage
64 |
65 | [[find in source code]](https://github.com/fourMs/MGT-python/blob/master/musicalgestures/_colored.py#L168)
66 |
67 | ```python
68 | class MgWaveformImage(object):
69 | def __init__(image_width=2500, image_height=500, cmap='freesound'):
70 | ```
71 |
72 | ### MgWaveformImage().draw_peaks
73 |
74 | [[find in source code]](https://github.com/fourMs/MGT-python/blob/master/musicalgestures/_colored.py#L194)
75 |
76 | ```python
77 | def draw_peaks(x, peaks, spectral_centroid):
78 | ```
79 |
80 | Draw 2 peaks at x using the spectral_centroid for color
81 |
82 | ### MgWaveformImage().interpolate_colors
83 |
84 | [[find in source code]](https://github.com/fourMs/MGT-python/blob/master/musicalgestures/_colored.py#L205)
85 |
86 | ```python
87 | def interpolate_colors(colors, flat=False, num_colors=256):
88 | ```
89 |
90 | Given a list of colors, create a larger list of colors linearly interpolating
91 | the first one. If flatten is True a list of numbers will be returned. If
92 | False, a list of (r,g,b) tuples. num_colors is the number of colors wanted
93 | in the final list
94 |
95 | ## min_max_level
96 |
97 | [[find in source code]](https://github.com/fourMs/MGT-python/blob/master/musicalgestures/_colored.py#L11)
98 |
99 | ```python
100 | def min_max_level(filename):
101 | ```
102 |
--------------------------------------------------------------------------------
/docs/musicalgestures/_cropping_window.md:
--------------------------------------------------------------------------------
1 | # Cropping Window
2 |
3 | > Auto-generated documentation for [musicalgestures._cropping_window](https://github.com/fourMs/MGT-python/blob/master/musicalgestures/_cropping_window.py) module.
4 |
5 | - [Mgt-python](../README.md#mgt-python) / [Modules](../MODULES.md#mgt-python-modules) / [Musicalgestures](index.md#musicalgestures) / Cropping Window
6 | - [draw_rectangle](#draw_rectangle)
7 |
8 | ## draw_rectangle
9 |
10 | [[find in source code]](https://github.com/fourMs/MGT-python/blob/master/musicalgestures/_cropping_window.py#L11)
11 |
12 | ```python
13 | def draw_rectangle(event, x, y, flags, param):
14 | ```
15 |
16 | Helper function to render a cropping window to the user in case of manual cropping, using cv2.
17 |
--------------------------------------------------------------------------------
/docs/musicalgestures/_cropvideo.md:
--------------------------------------------------------------------------------
1 | # Cropvideo
2 |
3 | > Auto-generated documentation for [musicalgestures._cropvideo](https://github.com/fourMs/MGT-python/blob/master/musicalgestures/_cropvideo.py) module.
4 |
5 | - [Mgt-python](../README.md#mgt-python) / [Modules](../MODULES.md#mgt-python-modules) / [Musicalgestures](index.md#musicalgestures) / Cropvideo
6 | - [async_subprocess](#async_subprocess)
7 | - [cropping_window](#cropping_window)
8 | - [find_motion_box_ffmpeg](#find_motion_box_ffmpeg)
9 | - [manual_text_input](#manual_text_input)
10 | - [mg_cropvideo_ffmpeg](#mg_cropvideo_ffmpeg)
11 | - [run_cropping_window](#run_cropping_window)
12 |
13 | ## async_subprocess
14 |
15 | [[find in source code]](https://github.com/fourMs/MGT-python/blob/master/musicalgestures/_cropvideo.py#L239)
16 |
17 | ```python
18 | async def async_subprocess(command):
19 | ```
20 |
21 | ## cropping_window
22 |
23 | [[find in source code]](https://github.com/fourMs/MGT-python/blob/master/musicalgestures/_cropvideo.py#L100)
24 |
25 | ```python
26 | def cropping_window(filename):
27 | ```
28 |
29 | ## find_motion_box_ffmpeg
30 |
31 | [[find in source code]](https://github.com/fourMs/MGT-python/blob/master/musicalgestures/_cropvideo.py#L9)
32 |
33 | ```python
34 | def find_motion_box_ffmpeg(
35 | filename,
36 | motion_box_thresh=0.1,
37 | motion_box_margin=12,
38 | ):
39 | ```
40 |
41 | Helper function to find the area of motion in a video, using ffmpeg.
42 |
43 | #### Arguments
44 |
45 | - `filename` *str* - Path to the video file.
46 | - `motion_box_thresh` *float, optional* - Pixel threshold to apply to the video before assessing the area of motion. Defaults to 0.1.
47 | - `motion_box_margin` *int, optional* - Margin (in pixels) to add to the detected motion box. Defaults to 12.
48 |
49 | #### Raises
50 |
51 | - `KeyboardInterrupt` - In case we stop the process manually.
52 |
53 | #### Returns
54 |
55 | - `int` - The width of the motion box.
56 | - `int` - The height of the motion box.
57 | - `int` - The X coordinate of the top left corner of the motion box.
58 | - `int` - The Y coordinate of the top left corner of the motion box.
59 |
60 | ## manual_text_input
61 |
62 | [[find in source code]](https://github.com/fourMs/MGT-python/blob/master/musicalgestures/_cropvideo.py#L282)
63 |
64 | ```python
65 | def manual_text_input():
66 | ```
67 |
68 | Helper function for mg_crop_video_ffmpeg when its crop_movement is 'manual', but the environment is in Colab.
69 | In this case we can't display the windowed cropping UI, so we ask for the values as a text input.
70 |
71 | #### Returns
72 |
73 | - `list` - x, y, w, h for crop_ffmpeg.
74 |
75 | ## mg_cropvideo_ffmpeg
76 |
77 | [[find in source code]](https://github.com/fourMs/MGT-python/blob/master/musicalgestures/_cropvideo.py#L172)
78 |
79 | ```python
80 | def mg_cropvideo_ffmpeg(
81 | filename,
82 | crop_movement='Auto',
83 | motion_box_thresh=0.1,
84 | motion_box_margin=12,
85 | target_name=None,
86 | overwrite=False,
87 | ):
88 | ```
89 |
90 | Crops the video using ffmpeg.
91 |
92 | #### Arguments
93 |
94 | - `filename` *str* - Path to the video file.
95 | - `crop_movement` *str, optional* - 'Auto' finds the bounding box that contains the total motion in the video. Motion threshold is given by motion_box_thresh. 'Manual' opens up a simple GUI that is used to crop the video manually by looking at the first frame. Defaults to 'Auto'.
96 | - `motion_box_thresh` *float, optional* - Only meaningful if `crop_movement='Auto'`. Takes floats between 0 and 1, where 0 includes all the motion and 1 includes none. Defaults to 0.1.
97 | - `motion_box_margin` *int, optional* - Only meaningful if `crop_movement='Auto'`. Adds margin to the bounding box. Defaults to 12.
98 | - `target_name` *str, optional* - The name of the output video. Defaults to None (which assumes that the input filename with the suffix "_crop" should be used).
99 | - `overwrite` *bool, optional* - Whether to allow overwriting existing files or to automatically increment target filenames to avoid overwriting. Defaults to False.
100 |
101 | #### Returns
102 |
103 | - `str` - Path to the cropped video.
104 |
105 | ## run_cropping_window
106 |
107 | [[find in source code]](https://github.com/fourMs/MGT-python/blob/master/musicalgestures/_cropvideo.py#L258)
108 |
109 | ```python
110 | def run_cropping_window(imgpath, scale_ratio, scaled_width, scaled_height):
111 | ```
112 |
--------------------------------------------------------------------------------
/docs/musicalgestures/_directograms.md:
--------------------------------------------------------------------------------
1 | # Directograms
2 |
3 | > Auto-generated documentation for [musicalgestures._directograms](https://github.com/fourMs/MGT-python/blob/master/musicalgestures/_directograms.py) module.
4 |
5 | - [Mgt-python](../README.md#mgt-python) / [Modules](../MODULES.md#mgt-python-modules) / [Musicalgestures](index.md#musicalgestures) / Directograms
6 | - [directogram](#directogram)
7 | - [matrix3D_norm](#matrix3d_norm)
8 | - [mg_directograms](#mg_directograms)
9 |
10 | ## directogram
11 |
12 | [[find in source code]](https://github.com/fourMs/MGT-python/blob/master/musicalgestures/_directograms.py#L24)
13 |
14 | ```python
15 | @jit(nopython=True)
16 | def directogram(optical_flow):
17 | ```
18 |
19 | ## matrix3D_norm
20 |
21 | [[find in source code]](https://github.com/fourMs/MGT-python/blob/master/musicalgestures/_directograms.py#L14)
22 |
23 | ```python
24 | @jit(nopython=True)
25 | def matrix3D_norm(matrix):
26 | ```
27 |
28 | ## mg_directograms
29 |
30 | [[find in source code]](https://github.com/fourMs/MGT-python/blob/master/musicalgestures/_directograms.py#L39)
31 |
32 | ```python
33 | def mg_directograms(
34 | self,
35 | title=None,
36 | filtertype='Adaptative',
37 | thresh=0.05,
38 | kernel_size=5,
39 | target_name=None,
40 | overwrite=False,
41 | ):
42 | ```
43 |
44 | Compute a directogram to factor the magnitude of motion into different angles.
45 | Each columun of the directogram is computed as the weighted histogram (HISTOGRAM_BINS) of angles for the optical flow of an input frame.
46 |
47 | Source: Abe Davis -- [Visual Rhythm and Beat](http://www.abedavis.com/files/papers/VisualRhythm_Davis18.pdf) (section 4.1)
48 |
49 | #### Arguments
50 |
51 | - `title` *str, optional* - Optionally add title to the figure. Defaults to None, which uses 'Directogram' as a title. Defaults to None.
52 | - `filtertype` *str, optional* - 'Regular' turns all values below `thresh` to 0. 'Binary' turns all values below `thresh` to 0, above `thresh` to 1. 'Blob' removes individual pixels with erosion method. 'Adaptative' perform adaptative threshold as the weighted sum of 11 neighborhood pixels where weights are a Gaussian window. Defaults to 'Adaptative'.
53 | - `thresh` *float, optional* - Eliminates pixel values less than given threshold. Ranges from 0 to 1. Defaults to 0.05.
54 | - `kernel_size` *int, optional* - Size of structuring element. Defaults to 5.
55 | - `target_name` *str, optional* - Target output name for the directogram. Defaults to None (which assumes that the input filename with the suffix "_dg" should be used).
56 | - `overwrite` *bool, optional* - Whether to allow overwriting existing files or to automatically increment target filenames to avoid overwriting. Defaults to False.
57 |
58 | #### Returns
59 |
60 | - `MgFigure` - A MgFigure object referring to the internal figure and its data.
61 |
--------------------------------------------------------------------------------
/docs/musicalgestures/_filter.md:
--------------------------------------------------------------------------------
1 | # Filter
2 |
3 | > Auto-generated documentation for [musicalgestures._filter](https://github.com/fourMs/MGT-python/blob/master/musicalgestures/_filter.py) module.
4 |
5 | - [Mgt-python](../README.md#mgt-python) / [Modules](../MODULES.md#mgt-python-modules) / [Musicalgestures](index.md#musicalgestures) / Filter
6 | - [filter_frame](#filter_frame)
7 | - [filter_frame_ffmpeg](#filter_frame_ffmpeg)
8 |
9 | ## filter_frame
10 |
11 | [[find in source code]](https://github.com/fourMs/MGT-python/blob/master/musicalgestures/_filter.py#L8)
12 |
13 | ```python
14 | def filter_frame(motion_frame, filtertype, thresh, kernel_size):
15 | ```
16 |
17 | Applies a threshold filter and then a median filter (of `kernel_size`x`kernel_size`) to an image or videoframe.
18 |
19 | #### Arguments
20 |
21 | - `motion_frame` *np.array(uint8)* - Input motion image.
22 | - `filtertype` *str* - 'Regular' turns all values below `thresh` to 0. 'Binary' turns all values below `thresh` to 0, above `thresh` to 1. 'Blob' removes individual pixels with erosion method.
23 | - `thresh` *float* - A number in the range of 0 to 1. Eliminates pixel values less than given threshold.
24 | - `kernel_size` *int* - Size of structuring element.
25 |
26 | #### Returns
27 |
28 | - `np.array(uint8)` - The filtered frame.
29 |
30 | ## filter_frame_ffmpeg
31 |
32 | [[find in source code]](https://github.com/fourMs/MGT-python/blob/master/musicalgestures/_filter.py#L32)
33 |
34 | ```python
35 | def filter_frame_ffmpeg(
36 | filename,
37 | cmd,
38 | color,
39 | blur,
40 | filtertype,
41 | threshold,
42 | kernel_size,
43 | use_median,
44 | invert=False,
45 | ):
46 | ```
47 |
--------------------------------------------------------------------------------
/docs/musicalgestures/_grid.md:
--------------------------------------------------------------------------------
1 | # Grid
2 |
3 | > Auto-generated documentation for [musicalgestures._grid](https://github.com/fourMs/MGT-python/blob/master/musicalgestures/_grid.py) module.
4 |
5 | - [Mgt-python](../README.md#mgt-python) / [Modules](../MODULES.md#mgt-python-modules) / [Musicalgestures](index.md#musicalgestures) / Grid
6 | - [mg_grid](#mg_grid)
7 |
8 | ## mg_grid
9 |
10 | [[find in source code]](https://github.com/fourMs/MGT-python/blob/master/musicalgestures/_grid.py#L6)
11 |
12 | ```python
13 | def mg_grid(
14 | self,
15 | height=300,
16 | rows=3,
17 | cols=3,
18 | padding=0,
19 | margin=0,
20 | target_name=None,
21 | overwrite=False,
22 | return_array=False,
23 | ):
24 | ```
25 |
26 | Generates frame strip video preview using ffmpeg.
27 |
28 | #### Arguments
29 |
30 | - `height` *int, optional* - Frame height, width is adjusted automatically to keep the correct aspect ratio. Defaults to 300.
31 | - `rows` *int, optional* - Number of rows of the grid. Defaults to 3.
32 | - `cols` *int, optional* - Number of columns of the grid. Defaults to 3.
33 | - `padding` *int, optional* - Padding size between the frames. Defaults to 0.
34 | - `margin` *int, optional* - Margin size for the grid. Defaults to 0.
35 | - `target_name` *[type], optional* - Target output name for the grid image. Defaults to None.
36 | - `overwrite` *bool, optional* - Whether to allow overwriting existing files or to automatically increment target filenames to avoid overwriting. Defaults to False.
37 | - `return_array` *bool, optional* - Whether to return an array of not. If set to False the function writes the grid image to disk. Defaults to False.
38 |
39 | #### Returns
40 |
41 | - `MgImage` - An MgImage object referring to the internal grid image.
42 |
--------------------------------------------------------------------------------
/docs/musicalgestures/_history.md:
--------------------------------------------------------------------------------
1 | # History
2 |
3 | > Auto-generated documentation for [musicalgestures._history](https://github.com/fourMs/MGT-python/blob/master/musicalgestures/_history.py) module.
4 |
5 | - [Mgt-python](../README.md#mgt-python) / [Modules](../MODULES.md#mgt-python-modules) / [Musicalgestures](index.md#musicalgestures) / History
6 | - [ParameterError](#parametererror)
7 | - [history_cv2](#history_cv2)
8 | - [history_ffmpeg](#history_ffmpeg)
9 |
10 | ## ParameterError
11 |
12 | [[find in source code]](https://github.com/fourMs/MGT-python/blob/master/musicalgestures/_history.py#L8)
13 |
14 | ```python
15 | class ParameterError(Exception):
16 | ```
17 |
18 | Base class for argument errors.
19 |
20 | ## history_cv2
21 |
22 | [[find in source code]](https://github.com/fourMs/MGT-python/blob/master/musicalgestures/_history.py#L110)
23 |
24 | ```python
25 | def history_cv2(
26 | self,
27 | filename=None,
28 | history_length=10,
29 | weights=1,
30 | target_name=None,
31 | overwrite=False,
32 | ):
33 | ```
34 |
35 | This function creates a video where each frame is the average of the N previous frames, where n is determined by `history_length`. The history frames are summed up and normalized, and added to the current frame to show the history. Uses cv2.
36 |
37 | #### Arguments
38 |
39 | - `filename` *str, optional* - Path to the input video file. If None, the video file of the MgVideo is used. Defaults to None.
40 | - `history_length` *int, optional* - Number of frames to be saved in the history tail. Defaults to 10.
41 | - `weights` *int/float/list, optional* - Defines the weight or weights applied to the frames in the history tail. If given as list the first element in the list will correspond to the weight of the newest frame in the tail. Defaults to 1.
42 | - `target_name` *str, optional* - Target output name for the video. Defaults to None (which assumes that the input filename with the suffix "_history" should be used).
43 | - `overwrite` *bool, optional* - Whether to allow overwriting existing files or to automatically increment target filenames to avoid overwriting. Defaults to False.
44 |
45 | #### Returns
46 |
47 | - `MgVideo` - A new MgVideo pointing to the output video file.
48 |
49 | ## history_ffmpeg
50 |
51 | [[find in source code]](https://github.com/fourMs/MGT-python/blob/master/musicalgestures/_history.py#L13)
52 |
53 | ```python
54 | def history_ffmpeg(
55 | self,
56 | filename=None,
57 | history_length=10,
58 | weights=1,
59 | normalize=False,
60 | norm_strength=1,
61 | norm_smooth=0,
62 | target_name=None,
63 | overwrite=False,
64 | ):
65 | ```
66 |
67 | This function creates a video where each frame is the average of the N previous frames, where n is determined by `history_length`. The history frames are summed up and normalized, and added to the current frame to show the history. Uses ffmpeg.
68 |
69 | #### Arguments
70 |
71 | - `filename` *str, optional* - Path to the input video file. If None, the video file of the MgVideo is used. Defaults to None.
72 | - `history_length` *int, optional* - Number of frames to be saved in the history tail. Defaults to 10.
73 | - `weights` *int/float/list/str, optional* - Defines the weight or weights applied to the frames in the history tail. If given as list the first element in the list will correspond to the weight of the newest frame in the tail. If given as a str - like "3 1.2 1" - it will be automatically converted to a list - like [3, 1.2, 1]. Defaults to 1.
74 | - `normalize` *bool, optional* - If True, the history video will be normalized. This can be useful when processing motion (frame difference) videos. Defaults to False.
75 | - `norm_strength` *int/float, optional* - Defines the strength of the normalization where 1 represents full strength. Defaults to 1.
76 | - `norm_smooth` *int, optional* - Defines the number of previous frames to use for temporal smoothing. The input range of each channel is smoothed using a rolling average over the current frame and the `norm_smooth` previous frames. Defaults to 0.
77 | - `target_name` *str, optional* - Target output name for the video. Defaults to None (which assumes that the input filename with the suffix "_history" should be used).
78 | - `overwrite` *bool, optional* - Whether to allow overwriting existing files or to automatically increment target filenames to avoid overwriting. Defaults to False.
79 |
80 | #### Returns
81 |
82 | - `MgVideo` - A new MgVideo pointing to the output video file.
83 |
--------------------------------------------------------------------------------
/docs/musicalgestures/_impacts.md:
--------------------------------------------------------------------------------
1 | # Impacts
2 |
3 | > Auto-generated documentation for [musicalgestures._impacts](https://github.com/fourMs/MGT-python/blob/master/musicalgestures/_impacts.py) module.
4 |
5 | - [Mgt-python](../README.md#mgt-python) / [Modules](../MODULES.md#mgt-python-modules) / [Musicalgestures](index.md#musicalgestures) / Impacts
6 | - [impact_detection](#impact_detection)
7 | - [impact_envelope](#impact_envelope)
8 | - [mg_impacts](#mg_impacts)
9 |
10 | ## impact_detection
11 |
12 | [[find in source code]](https://github.com/fourMs/MGT-python/blob/master/musicalgestures/_impacts.py#L29)
13 |
14 | ```python
15 | @jit(nopython=True)
16 | def impact_detection(envelopes, time, fps, local_mean=0.1, local_maxima=0.15):
17 | ```
18 |
19 | ## impact_envelope
20 |
21 | [[find in source code]](https://github.com/fourMs/MGT-python/blob/master/musicalgestures/_impacts.py#L12)
22 |
23 | ```python
24 | def impact_envelope(directogram, kernel_size=5):
25 | ```
26 |
27 | ## mg_impacts
28 |
29 | [[find in source code]](https://github.com/fourMs/MGT-python/blob/master/musicalgestures/_impacts.py#L50)
30 |
31 | ```python
32 | def mg_impacts(
33 | self,
34 | title=None,
35 | detection=True,
36 | local_mean=0.1,
37 | local_maxima=0.15,
38 | filtertype='Adaptative',
39 | thresh=0.05,
40 | kernel_size=5,
41 | target_name=None,
42 | overwrite=False,
43 | ):
44 | ```
45 |
46 | Compute a visual analogue of an onset envelope, aslo known as an impact envelope (Abe Davis).
47 | This is computed by summing over positive entries in the columns of the directogram. This gives an impact envelope with precisely the same
48 | form as an onset envelope. To account for large outlying spikes that sometimes happen at shot boundaries (i.e., cuts), the 99th percentile
49 | of the impact envelope values are clipped to the 98th percentile. Then, the impact envelopes are normalized by their maximum to make calculations
50 | more consistent across video resolutions. Fianlly, the local mean of the impact envelopes are calculated using a 0.1-second window, and local maxima
51 | using a 0.15-second window. Impacts are defined as local maxima that are above their local mean by at least 10% of the envelope’s global maximum.
52 |
53 | Source: Abe Davis -- [Visual Rhythm and Beat](http://www.abedavis.com/files/papers/VisualRhythm_Davis18.pdf) (section 4.2 and 4.3)
54 |
55 | #### Arguments
56 |
57 | - `title` *str, optional* - Optionally add title to the figure. Defaults to None, which uses 'Directogram' as a title. Defaults to None.
58 | - `detection` *bool, optional* - Whether to allow the detection of impacts based on local mean and local maxima or not.
59 | - `local_mean` *float, optional* - Size of the local mean window in seconds which reduces the amount of intensity variation between one impact and the next.
60 | - `local_maxima` *float, optional* - Size of the local maxima window in seconds for the impact envelopes
61 | - `filtertype` *str, optional* - 'Regular' turns all values below `thresh` to 0. 'Binary' turns all values below `thresh` to 0, above `thresh` to 1. 'Blob' removes individual pixels with erosion method. 'Adaptative' perform adaptative threshold as the weighted sum of 11 neighborhood pixels where weights are a Gaussian window. Defaults to 'Adaptative'.
62 | - `thresh` *float, optional* - Eliminates pixel values less than given threshold. Ranges from 0 to 1. Defaults to 0.05.
63 | - `kernel_size` *int, optional* - Size of structuring element. Defaults to 5.
64 | - `target_name` *str, optional* - Target output name for the directogram. Defaults to None (which assumes that the input filename with the suffix "_dg" should be used).
65 | - `overwrite` *bool, optional* - Whether to allow overwriting existing files or to automatically increment target filenames to avoid overwriting. Defaults to False.
66 |
67 | #### Returns
68 |
69 | - `MgFigure` - An MgFigure object referring to the internal figure and its data.
70 |
--------------------------------------------------------------------------------
/docs/musicalgestures/_info.md:
--------------------------------------------------------------------------------
1 | # Info
2 |
3 | > Auto-generated documentation for [musicalgestures._info](https://github.com/fourMs/MGT-python/blob/master/musicalgestures/_info.py) module.
4 |
5 | - [Mgt-python](../README.md#mgt-python) / [Modules](../MODULES.md#mgt-python-modules) / [Musicalgestures](index.md#musicalgestures) / Info
6 | - [mg_info](#mg_info)
7 | - [plot_frames](#plot_frames)
8 |
9 | ## mg_info
10 |
11 | [[find in source code]](https://github.com/fourMs/MGT-python/blob/master/musicalgestures/_info.py#L8)
12 |
13 | ```python
14 | def mg_info(self, type=None, autoshow=True, overwrite=False):
15 | ```
16 |
17 | Returns info about video/audio/format file using ffprobe.
18 |
19 | #### Arguments
20 |
21 | - `type` *str, optional* - Type of information to retrieve. Possible choice are 'audio', 'video', 'format' or 'frame'. Defaults to None (which gives info about video, audio and format).
22 | - `autoshow` *bool, optional* - Whether to show the I/P/B frames figure automatically. Defaults to True. NB: The type argument needs to be set to 'frame'.
23 | - `overwrite` *bool, optional* - Whether to allow overwriting existing files or to automatically increment target filename to avoid overwriting. Defaults to False.
24 |
25 | #### Returns
26 |
27 | - `str` - decoded ffprobe output (stdout) as a list containing three dictionaries for video, audio and format metadata.
28 |
29 | ## plot_frames
30 |
31 | [[find in source code]](https://github.com/fourMs/MGT-python/blob/master/musicalgestures/_info.py#L114)
32 |
33 | ```python
34 | def plot_frames(
35 | df,
36 | label,
37 | color_list=['#636EFA', '#00CC96', '#EF553B'],
38 | index=0,
39 | ):
40 | ```
41 |
--------------------------------------------------------------------------------
/docs/musicalgestures/_input_test.md:
--------------------------------------------------------------------------------
1 | # Input Test
2 |
3 | > Auto-generated documentation for [musicalgestures._input_test](https://github.com/fourMs/MGT-python/blob/master/musicalgestures/_input_test.py) module.
4 |
5 | - [Mgt-python](../README.md#mgt-python) / [Modules](../MODULES.md#mgt-python-modules) / [Musicalgestures](index.md#musicalgestures) / Input Test
6 | - [Error](#error)
7 | - [InputError](#inputerror)
8 | - [mg_input_test](#mg_input_test)
9 |
10 | ## Error
11 |
12 | [[find in source code]](https://github.com/fourMs/MGT-python/blob/master/musicalgestures/_input_test.py#L3)
13 |
14 | ```python
15 | class Error(Exception):
16 | ```
17 |
18 | Base class for exceptions in this module.
19 |
20 | ## InputError
21 |
22 | [[find in source code]](https://github.com/fourMs/MGT-python/blob/master/musicalgestures/_input_test.py#L8)
23 |
24 | ```python
25 | class InputError(Error):
26 | def __init__(message):
27 | ```
28 |
29 | Exception raised for errors in the input.
30 |
31 | #### Arguments
32 |
33 | - `Error` *str* - Explanation of the error.
34 |
35 | #### See also
36 |
37 | - [Error](#error)
38 |
39 | ## mg_input_test
40 |
41 | [[find in source code]](https://github.com/fourMs/MGT-python/blob/master/musicalgestures/_input_test.py#L20)
42 |
43 | ```python
44 | def mg_input_test(
45 | filename,
46 | array,
47 | fps,
48 | filtertype,
49 | thresh,
50 | starttime,
51 | endtime,
52 | blur,
53 | skip,
54 | frames,
55 | ):
56 | ```
57 |
58 | Gives feedback to user if initialization from input went wrong.
59 |
60 | #### Arguments
61 |
62 | - `filename` *str* - Path to the input video file.
63 | - `array` *np.ndarray, optional* - Generates an MgVideo object from a video array. Defauts to None.
64 | - `fps` *float, optional* - The frequency at which consecutive images from the video array are captured or displayed. Defauts to None.
65 | - `filtertype` *str* - 'Regular' turns all values below `thresh` to 0. 'Binary' turns all values below `thresh` to 0, above `thresh` to 1. 'Blob' removes individual pixels with erosion method.
66 | - `thresh` *float* - A number in the range of 0 to 1. Eliminates pixel values less than given threshold.
67 | - `starttime` *int/float* - Trims the video from this start time (s).
68 | - `endtime` *int/float* - Trims the video until this end time (s).
69 | - `blur` *str* - 'Average' to apply a 10px * 10px blurring filter, 'None' otherwise.
70 | - `skip` *int* - Every n frames to discard. `skip=0` keeps all frames, `skip=1` skips every other frame.
71 | - `frames` *int* - Specify a fixed target number of frames to extract from the video.
72 |
73 | #### Raises
74 |
75 | - `InputError` - If the types or options are wrong in the input.
76 |
--------------------------------------------------------------------------------
/docs/musicalgestures/_mglist.md:
--------------------------------------------------------------------------------
1 | # MgList
2 |
3 | > Auto-generated documentation for [musicalgestures._mglist](https://github.com/fourMs/MGT-python/blob/master/musicalgestures/_mglist.py) module.
4 |
5 | - [Mgt-python](../README.md#mgt-python) / [Modules](../MODULES.md#mgt-python-modules) / [Musicalgestures](index.md#musicalgestures) / MgList
6 | - [MgList](#mglist)
7 | - [MgList().\_\_add\_\_](#mglist__add__)
8 | - [MgList().\_\_delitem\_\_](#mglist__delitem__)
9 | - [MgList().\_\_getitem\_\_](#mglist__getitem__)
10 | - [MgList().\_\_iadd\_\_](#mglist__iadd__)
11 | - [MgList().\_\_iter\_\_](#mglist__iter__)
12 | - [MgList().\_\_len\_\_](#mglist__len__)
13 | - [MgList().\_\_setitem\_\_](#mglist__setitem__)
14 | - [MgList().as_figure](#mglistas_figure)
15 | - [MgList().show](#mglistshow)
16 |
17 | ## MgList
18 |
19 | [[find in source code]](https://github.com/fourMs/MGT-python/blob/master/musicalgestures/_mglist.py#L5)
20 |
21 | ```python
22 | class MgList():
23 | def __init__(*objectlist):
24 | ```
25 |
26 | Class for handling lists of MgImage, MgFigure and MgList objects in the Musical Gestures Toolbox.
27 |
28 | Attributes
29 | ----------
30 | - *objectlist : objects and/or list(s) of objects
31 |
32 | MgObjects and/or MgImages to include in the list.
33 |
34 | ### MgList().\_\_add\_\_
35 |
36 | [[find in source code]](https://github.com/fourMs/MGT-python/blob/master/musicalgestures/_mglist.py#L132)
37 |
38 | ```python
39 | def __add__(other):
40 | ```
41 |
42 | Implements `+`.
43 |
44 | #### Arguments
45 |
46 | - `other` *MgImage/MgFigure/MgList* - The object(s) to add to the MgList.
47 |
48 | #### Returns
49 |
50 | - `MgList` - The incremented MgList.
51 |
52 | ### MgList().\_\_delitem\_\_
53 |
54 | [[find in source code]](https://github.com/fourMs/MGT-python/blob/master/musicalgestures/_mglist.py#L89)
55 |
56 | ```python
57 | def __delitem__(key):
58 | ```
59 |
60 | Implements deleting elements given an index from the MgList.
61 |
62 | #### Arguments
63 |
64 | - `key` *int* - The index of the element to delete.
65 |
66 | ### MgList().\_\_getitem\_\_
67 |
68 | [[find in source code]](https://github.com/fourMs/MGT-python/blob/master/musicalgestures/_mglist.py#L67)
69 |
70 | ```python
71 | def __getitem__(key):
72 | ```
73 |
74 | Implements getting elements given an index from the MgList.
75 |
76 | #### Arguments
77 |
78 | - `key` *int* - The index of the element to retrieve.
79 |
80 | #### Returns
81 |
82 | - `MgImage/MgFigure/MgList` - The element at `key`.
83 |
84 | ### MgList().\_\_iadd\_\_
85 |
86 | [[find in source code]](https://github.com/fourMs/MGT-python/blob/master/musicalgestures/_mglist.py#L107)
87 |
88 | ```python
89 | def __iadd__(other):
90 | ```
91 |
92 | Implements `+=`.
93 |
94 | #### Arguments
95 |
96 | - `other` *MgImage/MgFigure/MgList* - The object(s) to add to the MgList.
97 |
98 | #### Returns
99 |
100 | - `MgList` - The incremented MgList.
101 |
102 | ### MgList().\_\_iter\_\_
103 |
104 | [[find in source code]](https://github.com/fourMs/MGT-python/blob/master/musicalgestures/_mglist.py#L98)
105 |
106 | ```python
107 | def __iter__():
108 | ```
109 |
110 | Implements `iter()`.
111 |
112 | #### Returns
113 |
114 | - `iterator` - The iterator of `self.objectlist`.
115 |
116 | ### MgList().\_\_len\_\_
117 |
118 | [[find in source code]](https://github.com/fourMs/MGT-python/blob/master/musicalgestures/_mglist.py#L58)
119 |
120 | ```python
121 | def __len__():
122 | ```
123 |
124 | Implements `len()`.
125 |
126 | #### Returns
127 |
128 | - `int` - The length of the MgList.
129 |
130 | ### MgList().\_\_setitem\_\_
131 |
132 | [[find in source code]](https://github.com/fourMs/MGT-python/blob/master/musicalgestures/_mglist.py#L79)
133 |
134 | ```python
135 | def __setitem__(key, value):
136 | ```
137 |
138 | Implements setting elements given an index from the MgList.
139 |
140 | #### Arguments
141 |
142 | - `key` *int* - The index of the element to change.
143 | - `value` *MgImage/MgFigure/MgList* - The element to place at `key`.
144 |
145 | ### MgList().as_figure
146 |
147 | [[find in source code]](https://github.com/fourMs/MGT-python/blob/master/musicalgestures/_mglist.py#L163)
148 |
149 | ```python
150 | def as_figure(dpi=300, autoshow=True, title=None, export_png=True):
151 | ```
152 |
153 | Creates a time-aligned figure from all the elements in the MgList.
154 |
155 | #### Arguments
156 |
157 | - `dpi` *int, optional* - Image quality of the rendered figure in DPI. Defaults to 300.
158 | - `autoshow` *bool, optional* - Whether to show the resulting figure automatically. Defaults to True.
159 | - `title` *str, optional* - Optionally add a title to the figure. Defaults to None (no title).
160 | - `export_png` *bool, optional* - Whether to export a png image of the resulting figure automatically. Defaults to True.
161 |
162 | #### Returns
163 |
164 | - `MgFigure` - The MgFigure with all the elements from the MgList as layers.
165 |
166 | ### MgList().show
167 |
168 | [[find in source code]](https://github.com/fourMs/MGT-python/blob/master/musicalgestures/_mglist.py#L47)
169 |
170 | ```python
171 | def show(
172 | filename=None,
173 | key=None,
174 | mode='windowed',
175 | window_width=640,
176 | window_height=480,
177 | window_title=None,
178 | ):
179 | ```
180 |
181 | Iterates all objects in the MgList and calls `mg_show()` on them.
182 |
--------------------------------------------------------------------------------
/docs/musicalgestures/_motionanalysis.md:
--------------------------------------------------------------------------------
1 | # Motionanalysis
2 |
3 | > Auto-generated documentation for [musicalgestures._motionanalysis](https://github.com/fourMs/MGT-python/blob/master/musicalgestures/_motionanalysis.py) module.
4 |
5 | - [Mgt-python](../README.md#mgt-python) / [Modules](../MODULES.md#mgt-python-modules) / [Musicalgestures](index.md#musicalgestures) / Motionanalysis
6 | - [area](#area)
7 | - [centroid](#centroid)
8 |
9 | ## area
10 |
11 | [[find in source code]](https://github.com/fourMs/MGT-python/blob/master/musicalgestures/_motionanalysis.py#L42)
12 |
13 | ```python
14 | def area(motion_frame, height, width):
15 | ```
16 |
17 | ## centroid
18 |
19 | [[find in source code]](https://github.com/fourMs/MGT-python/blob/master/musicalgestures/_motionanalysis.py#L5)
20 |
21 | ```python
22 | def centroid(image, width, height):
23 | ```
24 |
25 | Computes the centroid and quantity of motion in an image or frame.
26 |
27 | #### Arguments
28 |
29 | - `image` *np.array(uint8)* - The input image matrix for the centroid estimation function.
30 | - `width` *int* - The pixel width of the input video capture.
31 | - `height` *int* - The pixel height of the input video capture.
32 |
33 | #### Returns
34 |
35 | - `np.array(2)` - X and Y coordinates of the centroid of motion.
36 | - `int` - Quantity of motion: How large the change was in pixels.
37 |
--------------------------------------------------------------------------------
/docs/musicalgestures/_motionvideo_mp_render.md:
--------------------------------------------------------------------------------
1 | # Motionvideo Mp Render
2 |
3 | > Auto-generated documentation for [musicalgestures._motionvideo_mp_render](https://github.com/fourMs/MGT-python/blob/master/musicalgestures/_motionvideo_mp_render.py) module.
4 |
5 | - [Mgt-python](../README.md#mgt-python) / [Modules](../MODULES.md#mgt-python-modules) / [Musicalgestures](index.md#musicalgestures) / Motionvideo Mp Render
6 | - [bool_from_str](#bool_from_str)
7 | - [calc_frame_groups](#calc_frame_groups)
8 | - [mg_motion_mp](#mg_motion_mp)
9 | - [run_pool](#run_pool)
10 |
11 | ## bool_from_str
12 |
13 | [[find in source code]](https://github.com/fourMs/MGT-python/blob/master/musicalgestures/_motionvideo_mp_render.py#L201)
14 |
15 | ```python
16 | def bool_from_str(boolstring):
17 | ```
18 |
19 | ## calc_frame_groups
20 |
21 | [[find in source code]](https://github.com/fourMs/MGT-python/blob/master/musicalgestures/_motionvideo_mp_render.py#L187)
22 |
23 | ```python
24 | def calc_frame_groups(framecount, num_cores):
25 | ```
26 |
27 | ## mg_motion_mp
28 |
29 | [[find in source code]](https://github.com/fourMs/MGT-python/blob/master/musicalgestures/_motionvideo_mp_render.py#L14)
30 |
31 | ```python
32 | def mg_motion_mp(args):
33 | ```
34 |
35 | ## run_pool
36 |
37 | [[find in source code]](https://github.com/fourMs/MGT-python/blob/master/musicalgestures/_motionvideo_mp_render.py#L182)
38 |
39 | ```python
40 | def run_pool(func, args, numprocesses):
41 | ```
42 |
--------------------------------------------------------------------------------
/docs/musicalgestures/_motionvideo_mp_run.md:
--------------------------------------------------------------------------------
1 | # Motionvideo Mp Run
2 |
3 | > Auto-generated documentation for [musicalgestures._motionvideo_mp_run](https://github.com/fourMs/MGT-python/blob/master/musicalgestures/_motionvideo_mp_run.py) module.
4 |
5 | - [Mgt-python](../README.md#mgt-python) / [Modules](../MODULES.md#mgt-python-modules) / [Musicalgestures](index.md#musicalgestures) / Motionvideo Mp Run
6 | - [TrackMultiProgress](#trackmultiprogress)
7 | - [TrackMultiProgress().progress](#trackmultiprogressprogress)
8 | - [TrackMultiProgress().reset](#trackmultiprogressreset)
9 | - [concat_videos](#concat_videos)
10 | - [mg_motion_mp](#mg_motion_mp)
11 | - [run_socket_server](#run_socket_server)
12 |
13 | ## TrackMultiProgress
14 |
15 | [[find in source code]](https://github.com/fourMs/MGT-python/blob/master/musicalgestures/_motionvideo_mp_run.py#L328)
16 |
17 | ```python
18 | class TrackMultiProgress():
19 | def __init__(numprocesses):
20 | ```
21 |
22 | ### TrackMultiProgress().progress
23 |
24 | [[find in source code]](https://github.com/fourMs/MGT-python/blob/master/musicalgestures/_motionvideo_mp_run.py#L333)
25 |
26 | ```python
27 | def progress(node, iteration):
28 | ```
29 |
30 | ### TrackMultiProgress().reset
31 |
32 | [[find in source code]](https://github.com/fourMs/MGT-python/blob/master/musicalgestures/_motionvideo_mp_run.py#L337)
33 |
34 | ```python
35 | def reset():
36 | ```
37 |
38 | ## concat_videos
39 |
40 | [[find in source code]](https://github.com/fourMs/MGT-python/blob/master/musicalgestures/_motionvideo_mp_run.py#L341)
41 |
42 | ```python
43 | def concat_videos(
44 | list_of_videos,
45 | target_name=None,
46 | overwrite=False,
47 | pb_prefix='Concatenating videos:',
48 | stream=True,
49 | ):
50 | ```
51 |
52 | ## mg_motion_mp
53 |
54 | [[find in source code]](https://github.com/fourMs/MGT-python/blob/master/musicalgestures/_motionvideo_mp_run.py#L16)
55 |
56 | ```python
57 | def mg_motion_mp(
58 | self,
59 | filtertype='Regular',
60 | thresh=0.05,
61 | blur='None',
62 | kernel_size=5,
63 | inverted_motionvideo=False,
64 | inverted_motiongram=False,
65 | unit='seconds',
66 | equalize_motiongram=True,
67 | save_plot=True,
68 | plot_title=None,
69 | save_data=True,
70 | data_format='csv',
71 | save_motiongrams=True,
72 | save_video=True,
73 | target_name_video=None,
74 | target_name_plot=None,
75 | target_name_data=None,
76 | target_name_mgx=None,
77 | target_name_mgy=None,
78 | overwrite=False,
79 | num_processes=-1,
80 | ):
81 | ```
82 |
83 | ## run_socket_server
84 |
85 | [[find in source code]](https://github.com/fourMs/MGT-python/blob/master/musicalgestures/_motionvideo_mp_run.py#L295)
86 |
87 | ```python
88 | def run_socket_server(host, port, pb, numprocesses):
89 | ```
90 |
--------------------------------------------------------------------------------
/docs/musicalgestures/_pose.md:
--------------------------------------------------------------------------------
1 | # Pose
2 |
3 | > Auto-generated documentation for [musicalgestures._pose](https://github.com/fourMs/MGT-python/blob/master/musicalgestures/_pose.py) module.
4 |
5 | - [Mgt-python](../README.md#mgt-python) / [Modules](../MODULES.md#mgt-python-modules) / [Musicalgestures](index.md#musicalgestures) / Pose
6 | - [download_model](#download_model)
7 | - [pose](#pose)
8 |
9 | ## download_model
10 |
11 | [[find in source code]](https://github.com/fourMs/MGT-python/blob/master/musicalgestures/_pose.py#L350)
12 |
13 | ```python
14 | def download_model(modeltype):
15 | ```
16 |
17 | Helper function to automatically download model (.caffemodel) files.
18 |
19 | ## pose
20 |
21 | [[find in source code]](https://github.com/fourMs/MGT-python/blob/master/musicalgestures/_pose.py#L13)
22 |
23 | ```python
24 | def pose(
25 | self,
26 | model='body_25',
27 | device='gpu',
28 | threshold=0.1,
29 | downsampling_factor=2,
30 | save_data=True,
31 | data_format='csv',
32 | save_video=True,
33 | target_name_video=None,
34 | target_name_data=None,
35 | overwrite=False,
36 | ):
37 | ```
38 |
39 | Renders a video with the pose estimation (aka. "keypoint detection" or "skeleton tracking") overlaid on it.
40 | Outputs the predictions in a text file containing the normalized x and y coordinates of each keypoints
41 | (default format is csv). Uses models from the [openpose](https://github.com/CMU-Perceptual-Computing-Lab/openpose) project.
42 |
43 | #### Arguments
44 |
45 | - `model` *str, optional* - 'body_25' loads the model trained on the BODY_25 dataset, 'mpi' loads the model trained on the Multi-Person Dataset (MPII), 'coco' loads one trained on the COCO dataset. The BODY_25 model outputs 25 points, the MPII model outputs 15 points, while the COCO model produces 18 points. Defaults to 'body_25'.
46 | - `device` *str, optional* - Sets the backend to use for the neural network ('cpu' or 'gpu'). Defaults to 'gpu'.
47 | - `threshold` *float, optional* - The normalized confidence threshold that decides whether we keep or discard a predicted point. Discarded points get substituted with (0, 0) in the output data. Defaults to 0.1.
48 | - `downsampling_factor` *int, optional* - Decides how much we downsample the video before we pass it to the neural network. For example `downsampling_factor=4` means that the input to the network is one-fourth the resolution of the source video. Heaviver downsampling reduces rendering time but produces lower quality pose estimation. Defaults to 2.
49 | - `save_data` *bool, optional* - Whether we save the predicted pose data to a file. Defaults to True.
50 | - `data_format` *str, optional* - Specifies format of pose-data. Accepted values are 'csv', 'tsv' and 'txt'. For multiple output formats, use list, eg. ['csv', 'txt']. Defaults to 'csv'.
51 | - `save_video` *bool, optional* - Whether we save the video with the estimated pose overlaid on it. Defaults to True.
52 | - `target_name_video` *str, optional* - Target output name for the video. Defaults to None (which assumes that the input filename with the suffix "_pose" should be used).
53 | - `target_name_data` *str, optional* - Target output name for the data. Defaults to None (which assumes that the input filename with the suffix "_pose" should be used).
54 | - `overwrite` *bool, optional* - Whether to allow overwriting existing files or to automatically increment target filenames to avoid overwriting. Defaults to False.
55 |
56 | #### Returns
57 |
58 | - `MgVideo` - An MgVideo pointing to the output video.
59 |
--------------------------------------------------------------------------------
/docs/musicalgestures/_show.md:
--------------------------------------------------------------------------------
1 | # Show
2 |
3 | > Auto-generated documentation for [musicalgestures._show](https://github.com/fourMs/MGT-python/blob/master/musicalgestures/_show.py) module.
4 |
5 | - [Mgt-python](../README.md#mgt-python) / [Modules](../MODULES.md#mgt-python-modules) / [Musicalgestures](index.md#musicalgestures) / Show
6 | - [mg_show](#mg_show)
7 | - [show_in_new_process](#show_in_new_process)
8 |
9 | ## mg_show
10 |
11 | [[find in source code]](https://github.com/fourMs/MGT-python/blob/master/musicalgestures/_show.py#L14)
12 |
13 | ```python
14 | def mg_show(
15 | self,
16 | filename=None,
17 | key=None,
18 | mode='windowed',
19 | window_width=640,
20 | window_height=480,
21 | window_title=None,
22 | **ipython_kwargs,
23 | ):
24 | ```
25 |
26 | General method to show an image or video file either in a window, or inline in a jupyter notebook.
27 |
28 | #### Arguments
29 |
30 | - `filename` *str, optional* - If given, [mg_show](#mg_show) will show this file instead of what it inherits from its parent object. Defaults to None.
31 | - `key` *str, optional* - If given, [mg_show](#mg_show) will search for file names corresponding to certain processes you have previously rendered on your source. It is meant to be a shortcut, so you don't have to remember the exact name (and path) of eg. a motion video corresponding to your source in your MgVideo, but you rather just use `MgVideo('path/to/vid.mp4').show(key='motion')`. Accepted values are 'mgx', 'mgy', 'vgx', 'vgy', 'blend', 'plot', 'motion', 'history', 'motionhistory', 'sparse', and 'dense'. Defaults to None.
32 | - `mode` *str, optional* - Whether to show things in a separate window or inline in the jupyter notebook. Accepted values are 'windowed' and 'notebook'. Defaults to 'windowed'.
33 | - `window_width` *int, optional* - The width of the window. Defaults to 640.
34 | - `window_height` *int, optional* - The height of the window. Defaults to 480.
35 | - `window_title` *str, optional* - The title of the window. If None, the title of the window will be the file name. Defaults to None.
36 | - `ipython_kwargs` *dict, optional* - Additional arguments for IPython.display.Image or IPython.display.Video. Defaults to None.
37 |
38 | ## show_in_new_process
39 |
40 | [[find in source code]](https://github.com/fourMs/MGT-python/blob/master/musicalgestures/_show.py#L301)
41 |
42 | ```python
43 | def show_in_new_process(cmd):
44 | ```
45 |
--------------------------------------------------------------------------------
/docs/musicalgestures/_show_window.md:
--------------------------------------------------------------------------------
1 | # Show Window
2 |
3 | > Auto-generated documentation for [musicalgestures._show_window](https://github.com/fourMs/MGT-python/blob/master/musicalgestures/_show_window.py) module.
4 |
5 | - [Mgt-python](../README.md#mgt-python) / [Modules](../MODULES.md#mgt-python-modules) / [Musicalgestures](index.md#musicalgestures) / Show Window
6 |
--------------------------------------------------------------------------------
/docs/musicalgestures/_ssm.md:
--------------------------------------------------------------------------------
1 | # Ssm
2 |
3 | > Auto-generated documentation for [musicalgestures._ssm](https://github.com/fourMs/MGT-python/blob/master/musicalgestures/_ssm.py) module.
4 |
5 | - [Mgt-python](../README.md#mgt-python) / [Modules](../MODULES.md#mgt-python-modules) / [Musicalgestures](index.md#musicalgestures) / Ssm
6 | - [mg_ssm](#mg_ssm)
7 | - [slow_dot](#slow_dot)
8 | - [smooth_downsample_feature_sequence](#smooth_downsample_feature_sequence)
9 |
10 | ## mg_ssm
11 |
12 | [[find in source code]](https://github.com/fourMs/MGT-python/blob/master/musicalgestures/_ssm.py#L67)
13 |
14 | ```python
15 | def mg_ssm(
16 | self,
17 | features='motiongrams',
18 | filtertype='Regular',
19 | thresh=0.05,
20 | blur='None',
21 | norm=np.inf,
22 | threshold=0.001,
23 | cmap='gray_r',
24 | use_median=False,
25 | kernel_size=5,
26 | invert_yaxis=True,
27 | title=None,
28 | target_name=None,
29 | overwrite=False,
30 | ):
31 | ```
32 |
33 | Compute Self-Similarity Matrix (SSM) by converting the input signal into a suitable feature sequence and comparing each element of the feature sequence with all other elements of the sequence.
34 | SSMs can be computed over different input features such as 'motiongrams', 'spectrogram', 'chromagram' and 'tempogram'.
35 |
36 | #### Arguments
37 |
38 | - `features` *str, optional* - Defines the type of features on which to compute SSM. Possible to compute SSM on 'motiongrams', 'videograms', 'spectrogram', 'chromagram' and 'tempogram'. Defaults to 'motiongrams'.
39 | - `filtertype` *str, optional* - 'Regular' turns all values below `thresh` to 0. 'Binary' turns all values below `thresh` to 0, above `thresh` to 1. 'Blob' removes individual pixels with erosion method. Defaults to 'Regular'.
40 | - `thresh` *float, optional* - Eliminates pixel values less than given threshold. Ranges from 0 to 1. Defaults to 0.05.
41 | - `blur` *str, optional* - 'Average' to apply a 10px * 10px blurring filter, 'None' otherwise. Defaults to 'None'.
42 | - `norm` *int, optional* - Normalize the columns of the feature sequence. Possible to compute Manhattan norm (1), Euclidean norm (2), Minimum norm (-np.inf), Maximum norm (np.inf), etc. Defaults to np.inf.
43 | - `threshold` *float, optional* - Only the columns with norm at least the amount of `threshold` indicated are normalized. Defaults to 0.001.
44 | - `cmap` *str, optional* - A Colormap instance or registered colormap name. The colormap maps the C values to colors. Defaults to 'gray_r'.
45 | - `use_median` *bool, optional* - If True the algorithm applies a median filter on the thresholded frame-difference stream. Defaults to False.
46 | - `kernel_size` *int, optional* - Size of the median filter (if `use_median=True`) or the erosion filter (if `filtertype='blob'`). Defaults to 5.
47 | - `invert_axis` *bool, optional* - Whether to invert the y axis of the SSM. Defaults to True.
48 | - `title` *str, optional* - Optionally add title to the figure. Possible to set the filename as the title using the string 'filename'. Defaults to None.
49 | - `target_name` *[type], optional* - Target output name for the SSM. Defaults to None.
50 | - `overwrite` *bool, optional* - Whether to allow overwriting existing files or to automatically increment target filenames to avoid overwriting. Defaults to False.
51 |
52 | #### Returns
53 |
54 | # if features='motiongrams':
55 | - `MgList` - An MgList pointing to the output SSM images (as MgImages).
56 | # else:
57 | - `MgImage` - An MgImage to the output SSM.
58 |
59 | ## slow_dot
60 |
61 | [[find in source code]](https://github.com/fourMs/MGT-python/blob/master/musicalgestures/_ssm.py#L48)
62 |
63 | ```python
64 | def slow_dot(X, Y, length):
65 | ```
66 |
67 | Low-memory implementation of dot product
68 |
69 | ## smooth_downsample_feature_sequence
70 |
71 | [[find in source code]](https://github.com/fourMs/MGT-python/blob/master/musicalgestures/_ssm.py#L19)
72 |
73 | ```python
74 | def smooth_downsample_feature_sequence(
75 | X,
76 | sr,
77 | filt_len=41,
78 | down_sampling=10,
79 | w_type='boxcar',
80 | ):
81 | ```
82 |
83 | Smoothes and downsamples a feature sequence. Smoothing is achieved by convolution with a filter kernel
84 |
85 | #### Arguments
86 |
87 | - `X` *np.ndarray* - Feature sequence.
88 | - `sr` *int* - Sampling rate.
89 | - `filt_len` *int, optional* - Length of smoothing filter. Defaults to 41.
90 | - `down_sampling` *int, optional* - Downsampling factor. Defaults to 10.
91 | - `w_type` *str, optional* - Window type of smoothing filter. Defaults to 'boxcar'.
92 |
93 | #### Returns
94 |
95 | - `X_smooth` *np.ndarray* - Smoothed and downsampled feature sequence.
96 | - `sr_feature` *scalar* - Sampling rate of `X_smooth`.
97 |
--------------------------------------------------------------------------------
/docs/musicalgestures/_subtract.md:
--------------------------------------------------------------------------------
1 | # Subtract
2 |
3 | > Auto-generated documentation for [musicalgestures._subtract](https://github.com/fourMs/MGT-python/blob/master/musicalgestures/_subtract.py) module.
4 |
5 | - [Mgt-python](../README.md#mgt-python) / [Modules](../MODULES.md#mgt-python-modules) / [Musicalgestures](index.md#musicalgestures) / Subtract
6 | - [mg_subtract](#mg_subtract)
7 |
8 | ## mg_subtract
9 |
10 | [[find in source code]](https://github.com/fourMs/MGT-python/blob/master/musicalgestures/_subtract.py#L8)
11 |
12 | ```python
13 | def mg_subtract(
14 | self,
15 | color=True,
16 | filtertype=None,
17 | threshold=0.05,
18 | blur=False,
19 | curves=0.15,
20 | use_median=False,
21 | kernel_size=5,
22 | bg_img=None,
23 | bg_color='#000000',
24 | target_name=None,
25 | overwrite=False,
26 | ):
27 | ```
28 |
29 | Renders background subtraction using ffmpeg.
30 |
31 | #### Arguments
32 |
33 | - `color` *bool, optional* - If False the input is converted to grayscale at the start of the process. This can significantly reduce render time. Defaults to True.
34 | - `filtertype` *str, optional* - 'Regular' turns all values below `thresh` to 0. 'Binary' turns all values below `thresh` to 0, above `thresh` to 1. 'Blob' removes individual pixels with erosion method. Defaults to 'Regular'.
35 | - `threshold` *float, optional* - Eliminates pixel values less than given threshold. Ranges from 0 to 1. Defaults to 0.05.
36 | - `blur` *bool, optional* - Whether to apply a smartblur ffmpeg filter or not. Defaults to False.
37 | - `curves` *int, optional* - Apply curves and equalisation threshold filter to subtract the background. Ranges from 0 to 1. Defaults to 0.15.
38 | - `use_median` *bool, optional* - If True the algorithm applies a median filter on the thresholded frame-difference stream. Defaults to False.
39 | - `kernel_size` *int, optional* - Size of the median filter (if `use_median=True`) or the erosion filter (if `filtertype='blob'`). Defaults to 5.
40 | - `bg_img` *str, optional* - Path to a background image (.png) that needs to be subtracted from the video. If set to None, it uses an average image of all frames in the video. Defaults to None.
41 | - `bg_color` *str, optional* - Set the background color in the video file in hex value. Defaults to '#000000' (black).
42 | - `target_name` *str, optional* - Target output name for the motiongram. Defaults to None.
43 | - `overwrite` *bool, optional* - Whether to allow overwriting existing files or to automatically increment target filenames to avoid overwriting. Defaults to False.
44 |
45 | #### Returns
46 |
47 | - `MgVideo` - A MgVideo as subtract for parent MgVideo
48 |
--------------------------------------------------------------------------------
/docs/musicalgestures/_video.md:
--------------------------------------------------------------------------------
1 | # Video
2 |
3 | > Auto-generated documentation for [musicalgestures._video](https://github.com/fourMs/MGT-python/blob/master/musicalgestures/_video.py) module.
4 |
5 | - [Mgt-python](../README.md#mgt-python) / [Modules](../MODULES.md#mgt-python-modules) / [Musicalgestures](index.md#musicalgestures) / Video
6 | - [MgVideo](#mgvideo)
7 | - [MgVideo().extract_frame](#mgvideoextract_frame)
8 | - [MgVideo().from_numpy](#mgvideofrom_numpy)
9 | - [MgVideo().get_video](#mgvideoget_video)
10 | - [MgVideo().numpy](#mgvideonumpy)
11 | - [MgVideo().test_input](#mgvideotest_input)
12 |
13 | ## MgVideo
14 |
15 | [[find in source code]](https://github.com/fourMs/MGT-python/blob/master/musicalgestures/_video.py#L19)
16 |
17 | ```python
18 | class MgVideo(MgAudio):
19 | def __init__(
20 | filename: str | list[str],
21 | array=None,
22 | fps=None,
23 | path=None,
24 | filtertype='Regular',
25 | thresh=0.05,
26 | starttime=0,
27 | endtime=0,
28 | blur='None',
29 | skip=0,
30 | frames=0,
31 | rotate=0,
32 | color=True,
33 | contrast=0,
34 | brightness=0,
35 | crop='None',
36 | keep_all=False,
37 | returned_by_process=False,
38 | sr=22050,
39 | n_fft=2048,
40 | hop_length=512,
41 | ):
42 | ```
43 |
44 | This is the class for working with video files in the Musical Gestures Toolbox. It inherites from the class MgAudio for working with audio files as well.
45 | There is a set of preprocessing tools you can use when you load a video, such as:
46 | - trimming: to extract a section of the video,
47 | - skipping: to shrink the video by skipping N frames after keeping one,
48 | - rotating: to rotate the video by N degrees,
49 | - applying brightness and contrast
50 | - cropping: to crop the video either automatically (by assessing the area of motion) or manually with a pop-up user interface,
51 | - converting to grayscale
52 |
53 | These preprocesses will apply upon creating the MgVideo. Further processes are available as class methods.
54 |
55 | #### See also
56 |
57 | - [MgAudio](_audio.md#mgaudio)
58 |
59 | ### MgVideo().extract_frame
60 |
61 | [[find in source code]](https://github.com/fourMs/MGT-python/blob/master/musicalgestures/_video.py#L307)
62 |
63 | ```python
64 | def extract_frame(**kwargs):
65 | ```
66 |
67 | Extracts a frame from the video at a given time.
68 | see _utils.extract_frame for details.
69 |
70 | #### Arguments
71 |
72 | - `frame` *int* - The frame number to extract.
73 | - `time` *str* - The time in HH:MM:ss.ms where to extract the frame from.
74 | - `target_name` *str, optional* - The name for the output file. If None, the name will be \FRAME\.\. Defaults to None.
75 | - `overwrite` *bool, optional* - Whether to allow overwriting existing files or to automatically increment target filename to avoid overwriting. Defaults to False.
76 |
77 | #### Returns
78 |
79 | - `MgImage` - An MgImage object referring to the extracted frame.
80 |
81 | ### MgVideo().from_numpy
82 |
83 | [[find in source code]](https://github.com/fourMs/MGT-python/blob/master/musicalgestures/_video.py#L267)
84 |
85 | ```python
86 | def from_numpy(array, fps, target_name=None):
87 | ```
88 |
89 | ### MgVideo().get_video
90 |
91 | [[find in source code]](https://github.com/fourMs/MGT-python/blob/master/musicalgestures/_video.py#L171)
92 |
93 | ```python
94 | def get_video():
95 | ```
96 |
97 | Creates a video attribute to the Musical Gestures object with the given correct settings.
98 |
99 | ### MgVideo().numpy
100 |
101 | [[find in source code]](https://github.com/fourMs/MGT-python/blob/master/musicalgestures/_video.py#L254)
102 |
103 | ```python
104 | def numpy():
105 | ```
106 |
107 | Pipe all video frames from FFmpeg to numpy array
108 |
109 | ### MgVideo().test_input
110 |
111 | [[find in source code]](https://github.com/fourMs/MGT-python/blob/master/musicalgestures/_video.py#L156)
112 |
113 | ```python
114 | def test_input():
115 | ```
116 |
117 | Gives feedback to user if initialization from input went wrong.
118 |
--------------------------------------------------------------------------------
/docs/musicalgestures/_videoadjust.md:
--------------------------------------------------------------------------------
1 | # Videoadjust
2 |
3 | > Auto-generated documentation for [musicalgestures._videoadjust](https://github.com/fourMs/MGT-python/blob/master/musicalgestures/_videoadjust.py) module.
4 |
5 | - [Mgt-python](../README.md#mgt-python) / [Modules](../MODULES.md#mgt-python-modules) / [Musicalgestures](index.md#musicalgestures) / Videoadjust
6 | - [contrast_brightness_ffmpeg](#contrast_brightness_ffmpeg)
7 | - [fixed_frames_ffmpeg](#fixed_frames_ffmpeg)
8 | - [skip_frames_ffmpeg](#skip_frames_ffmpeg)
9 |
10 | ## contrast_brightness_ffmpeg
11 |
12 | [[find in source code]](https://github.com/fourMs/MGT-python/blob/master/musicalgestures/_videoadjust.py#L8)
13 |
14 | ```python
15 | def contrast_brightness_ffmpeg(
16 | filename,
17 | contrast=0,
18 | brightness=0,
19 | target_name=None,
20 | overwrite=False,
21 | ):
22 | ```
23 |
24 | Applies contrast and brightness adjustments on the source video using ffmpeg.
25 |
26 | #### Arguments
27 |
28 | - `filename` *str* - Path to the video to process.
29 | - `contrast` *int/float, optional* - Increase or decrease contrast. Values range from -100 to 100. Defaults to 0.
30 | - `brightness` *int/float, optional* - Increase or decrease brightness. Values range from -100 to 100. Defaults to 0.
31 | - `target_name` *str, optional* - Defaults to None (which assumes that the input filename with the suffix "_cb" should be used).
32 | - `overwrite` *bool, optional* - Whether to allow overwriting existing files or to automatically increment target filename to avoid overwriting. Defaults to False.
33 |
34 | #### Returns
35 |
36 | - `str` - Path to the output video.
37 |
38 | ## fixed_frames_ffmpeg
39 |
40 | [[find in source code]](https://github.com/fourMs/MGT-python/blob/master/musicalgestures/_videoadjust.py#L100)
41 |
42 | ```python
43 | def fixed_frames_ffmpeg(
44 | filename,
45 | frames=0,
46 | target_name=None,
47 | overwrite=False,
48 | ):
49 | ```
50 |
51 | Specify a fixed target number frames to extract from the video.
52 | To extract only keyframes from the video, set the parameter keyframes to True.
53 |
54 | #### Arguments
55 |
56 | - `filename` *str* - Path to the video to process.
57 | - `frames` *int), optional* - Number frames to extract from the video. If set to -1, it will only extract the keyframes of the video. Defaults to 0.
58 | - `target_name` *str, optional* - Defaults to None (which assumes that the input filename with the suffix "_fixed" should be used).
59 | - `overwrite` *bool, optional* - Whether to allow overwriting existing files or to automatically increment target filename to avoid overwriting. Defaults to False.
60 |
61 | #### Returns
62 |
63 | - `str` - Path to the output video.
64 |
65 | ## skip_frames_ffmpeg
66 |
67 | [[find in source code]](https://github.com/fourMs/MGT-python/blob/master/musicalgestures/_videoadjust.py#L60)
68 |
69 | ```python
70 | def skip_frames_ffmpeg(filename, skip=0, target_name=None, overwrite=False):
71 | ```
72 |
73 | Time-shrinks the video by skipping (discarding) every n frames determined by `skip`.
74 | To discard half of the frames (ie. double the speed of the video) use `skip=1`.
75 |
76 | #### Arguments
77 |
78 | - `filename` *str* - Path to the video to process.
79 | - `skip` *int, optional* - Discard `skip` frames before keeping one. Defaults to 0.
80 | - `target_name` *str, optional* - Defaults to None (which assumes that the input filename with the suffix "_skip" should be used).
81 | - `overwrite` *bool, optional* - Whether to allow overwriting existing files or to automatically increment target filename to avoid overwriting. Defaults to False.
82 |
83 | #### Returns
84 |
85 | - `str` - Path to the output video.
86 |
--------------------------------------------------------------------------------
/docs/musicalgestures/_videograms.md:
--------------------------------------------------------------------------------
1 | # Videograms
2 |
3 | > Auto-generated documentation for [musicalgestures._videograms](https://github.com/fourMs/MGT-python/blob/master/musicalgestures/_videograms.py) module.
4 |
5 | - [Mgt-python](../README.md#mgt-python) / [Modules](../MODULES.md#mgt-python-modules) / [Musicalgestures](index.md#musicalgestures) / Videograms
6 | - [videograms_ffmpeg](#videograms_ffmpeg)
7 |
8 | ## videograms_ffmpeg
9 |
10 | [[find in source code]](https://github.com/fourMs/MGT-python/blob/master/musicalgestures/_videograms.py#L10)
11 |
12 | ```python
13 | def videograms_ffmpeg(
14 | self,
15 | target_name_x=None,
16 | target_name_y=None,
17 | overwrite=False,
18 | ):
19 | ```
20 |
21 | Renders horizontal and vertical videograms of the source video using ffmpeg. Averages videoframes by axes,
22 | and creates two images of the horizontal-axis and vertical-axis stacks. In these stacks, a single row or
23 | column corresponds to a frame from the source video, and the index of the row or column corresponds to
24 | the index of the source frame.
25 |
26 | #### Arguments
27 |
28 | - `target_name_x` *str, optional* - Target output name for the videogram on the X axis. Defaults to None (which assumes that the input filename with the suffix "_vgx" should be used).
29 | - `target_name_y` *str, optional* - Target output name for the videogram on the Y axis. Defaults to None (which assumes that the input filename with the suffix "_vgy" should be used).
30 | - `overwrite` *bool, optional* - Whether to allow overwriting existing files or to automatically increment target filenames to avoid overwriting. Defaults to False.
31 |
32 | #### Returns
33 |
34 | - `MgList` - An MgList with the MgImage objects referring to the horizontal and vertical videograms respectively.
35 |
--------------------------------------------------------------------------------
/docs/musicalgestures/_videoreader.md:
--------------------------------------------------------------------------------
1 | # Videoreader
2 |
3 | > Auto-generated documentation for [musicalgestures._videoreader](https://github.com/fourMs/MGT-python/blob/master/musicalgestures/_videoreader.py) module.
4 |
5 | - [Mgt-python](../README.md#mgt-python) / [Modules](../MODULES.md#mgt-python-modules) / [Musicalgestures](index.md#musicalgestures) / Videoreader
6 | - [ReadError](#readerror)
7 | - [mg_videoreader](#mg_videoreader)
8 |
9 | ## ReadError
10 |
11 | [[find in source code]](https://github.com/fourMs/MGT-python/blob/master/musicalgestures/_videoreader.py#L9)
12 |
13 | ```python
14 | class ReadError(Exception):
15 | ```
16 |
17 | Base class for file read errors.
18 |
19 | ## mg_videoreader
20 |
21 | [[find in source code]](https://github.com/fourMs/MGT-python/blob/master/musicalgestures/_videoreader.py#L14)
22 |
23 | ```python
24 | def mg_videoreader(
25 | filename,
26 | starttime=0,
27 | endtime=0,
28 | skip=0,
29 | frames=0,
30 | rotate=0,
31 | contrast=0,
32 | brightness=0,
33 | crop='None',
34 | color=True,
35 | keep_all=False,
36 | returned_by_process=False,
37 | ):
38 | ```
39 |
40 | Reads in a video file, and optionally apply several different processes on it. These include:
41 | - trimming,
42 | - skipping,
43 | - fixing,
44 | - rotating,
45 | - applying brightness and contrast,
46 | - cropping,
47 | - converting to grayscale.
48 |
49 | #### Arguments
50 |
51 | - `filename` *str* - Path to the input video file.
52 | - `starttime` *int/float, optional* - Trims the video from this start time (s). Defaults to 0.
53 | - `endtime` *int/float, optional* - Trims the video until this end time (s). Defaults to 0 (which will make the algorithm use the full length of the input video instead).
54 | - `skip` *int, optional* - Time-shrinks the video by skipping (discarding) every n frames determined by `skip`. Defaults to 0.
55 | - `frames` *int, optional* - Specify a fixed target number of frames to extract from the video. Defaults to 0.
56 | - `rotate` *int/float, optional* - Rotates the video by a `rotate` degrees. Defaults to 0.
57 | - `contrast` *int/float, optional* - Applies +/- 100 contrast to video. Defaults to 0.
58 | - `brightness` *int/float, optional* - Applies +/- 100 brightness to video. Defaults to 0.
59 | - `crop` *str, optional* - If 'manual', opens a window displaying the first frame of the input video file, where the user can draw a rectangle to which cropping is applied. If 'auto' the cropping function attempts to determine the area of significant motion and applies the cropping to that area. Defaults to 'None'.
60 | - `color` *bool, optional* - If False, converts the video to grayscale and sets every method in grayscale mode. Defaults to True.
61 | - `keep_all` *bool, optional* - If True, preserves an output video file after each used preprocessing stage. Defaults to False.
62 | - `returned_by_process` *bool, optional* - This parameter is only for internal use, do not use it. Defaults to False.
63 |
64 | #### Returns
65 |
66 | - `int` - The number of frames in the output video file.
67 | - `int` - The pixel width of the output video file.
68 | - `int` - The pixel height of the output video file.
69 | - `int` - The FPS (frames per second) of the output video file.
70 | - `float` - The length of the output video file in seconds.
71 | - `str` - The path to the output video file without its extension. The file name gets a suffix for each used process.
72 | - `str` - The file extension of the output video file.
73 | - `bool` - Whether the video has an audio track.
74 |
--------------------------------------------------------------------------------
/docs/musicalgestures/_warp.md:
--------------------------------------------------------------------------------
1 | # Warp
2 |
3 | > Auto-generated documentation for [musicalgestures._warp](https://github.com/fourMs/MGT-python/blob/master/musicalgestures/_warp.py) module.
4 |
5 | - [Mgt-python](../README.md#mgt-python) / [Modules](../MODULES.md#mgt-python-modules) / [Musicalgestures](index.md#musicalgestures) / Warp
6 | - [beats_diff](#beats_diff)
7 | - [mg_warp_audiovisual_beats](#mg_warp_audiovisual_beats)
8 |
9 | ## beats_diff
10 |
11 | [[find in source code]](https://github.com/fourMs/MGT-python/blob/master/musicalgestures/_warp.py#L14)
12 |
13 | ```python
14 | @jit(nopython=True)
15 | def beats_diff(beats, media):
16 | ```
17 |
18 | ## mg_warp_audiovisual_beats
19 |
20 | [[find in source code]](https://github.com/fourMs/MGT-python/blob/master/musicalgestures/_warp.py#L21)
21 |
22 | ```python
23 | def mg_warp_audiovisual_beats(
24 | self,
25 | audio_file,
26 | speed=(0.5, 2),
27 | data=None,
28 | filtertype='Adaptative',
29 | thresh=0.05,
30 | kernel_size=5,
31 | target_name=None,
32 | overwrite=False,
33 | ):
34 | ```
35 |
36 | Warp audio beats with visual beats (patterns of motion that can be shifted in time to control visual rhythm).
37 | Visual beats are warped after computing a directogram which factors the magnitude of motion in the video into different angles.
38 |
39 | Source: Abe Davis -- [Visual Rhythm and Beat](http://www.abedavis.com/files/papers/VisualRhythm_Davis18.pdf) (section 5)
40 |
41 | #### Arguments
42 |
43 | - `audio_file` *str* - Path to the audio file.
44 | - `speed` *tuple, optional* - Speed's change between the audiovisual beats which can be adjusted to slow down or speed up the visual rhythms. Defaults to (0.5,2).
45 | - `data` *array_like, optional* - Computed directogram data can be added separately to avoid the directogram processing time (which can be quite long). Defaults to None.
46 | - `filtertype` *str, optional* - 'Regular' turns all values below `thresh` to 0. 'Binary' turns all values below `thresh` to 0, above `thresh` to 1. 'Blob' removes individual pixels with erosion method. 'Adaptative' perform adaptative threshold as the weighted sum of 11 neighborhood pixels where weights are a Gaussian window. Defaults to 'Adaptative'.
47 | - `thresh` *float, optional* - Eliminates pixel values less than given threshold. Ranges from 0 to 1. Defaults to 0.05.
48 | - `kernel_size` *int, optional* - Size of structuring element. Defaults to 5.
49 | - `target_name` *str, optional* - Target output name for the directogram. Defaults to None (which assumes that the input filename with the suffix "_dg" should be used).
50 | - `overwrite` *bool, optional* - Whether to allow overwriting existing files or to automatically increment target filenames to avoid overwriting. Defaults to False.
51 |
52 | #### Returns
53 |
54 | - `MgVideo` - A MgVideo as warp_audiovisual_beats for parent MgVideo
55 |
--------------------------------------------------------------------------------
/docs/musicalgestures/examples/index.md:
--------------------------------------------------------------------------------
1 | # Examples
2 |
3 | > Auto-generated documentation for [musicalgestures.examples](https://github.com/fourMs/MGT-python/blob/master/musicalgestures/examples/__init__.py) module.
4 |
5 | - [Mgt-python](../../README.md#mgt-python) / [Modules](../../MODULES.md#mgt-python-modules) / [Musicalgestures](../index.md#musicalgestures) / Examples
6 | - Modules
7 | - [Test Dance](test_dance.md#test-dance)
8 | - [Test Pianist](test_pianist.md#test-pianist)
9 |
--------------------------------------------------------------------------------
/docs/musicalgestures/examples/test_dance.md:
--------------------------------------------------------------------------------
1 | # Test Dance
2 |
3 | > Auto-generated documentation for [musicalgestures.examples.test_dance](https://github.com/fourMs/MGT-python/blob/master/musicalgestures/examples/test_dance.py) module.
4 |
5 | - [Mgt-python](../../README.md#mgt-python) / [Modules](../../MODULES.md#mgt-python-modules) / [Musicalgestures](../index.md#musicalgestures) / [Examples](index.md#examples) / Test Dance
6 |
7 | #### Attributes
8 |
9 | - `mg` - CREATE MODULE OBJECT: Here is an example call to create an MgVideo, using loads of parameters: `musicalgestures.MgVideo('../dance.avi', startti...`
10 |
--------------------------------------------------------------------------------
/docs/musicalgestures/examples/test_pianist.md:
--------------------------------------------------------------------------------
1 | # Test Pianist
2 |
3 | > Auto-generated documentation for [musicalgestures.examples.test_pianist](https://github.com/fourMs/MGT-python/blob/master/musicalgestures/examples/test_pianist.py) module.
4 |
5 | - [Mgt-python](../../README.md#mgt-python) / [Modules](../../MODULES.md#mgt-python-modules) / [Musicalgestures](../index.md#musicalgestures) / [Examples](index.md#examples) / Test Pianist
6 |
7 | #### Attributes
8 |
9 | - `mg` - CREATE MODULE OBJECT: Here is an example call to create an MgVideo, using loads of parameters: `musicalgestures.MgVideo('pianist.avi', color=False, crop='auto', skip=3)`
10 |
--------------------------------------------------------------------------------
/docs/musicalgestures/index.md:
--------------------------------------------------------------------------------
1 | # Musicalgestures
2 |
3 | > Auto-generated documentation for [musicalgestures](https://github.com/fourMs/MGT-python/blob/master/musicalgestures/__init__.py) module.
4 |
5 | - [Mgt-python](../README.md#mgt-python) / [Modules](../MODULES.md#mgt-python-modules) / Musicalgestures
6 | - [Examples](#examples)
7 | - Modules
8 | - [360video](_360video.md#360video)
9 | - [Audio](_audio.md#audio)
10 | - [Blend](_blend.md#blend)
11 | - [Blurfaces](_blurfaces.md#blurfaces)
12 | - [CenterFace](_centerface.md#centerface)
13 | - [Colored](_colored.md#colored)
14 | - [Cropping Window](_cropping_window.md#cropping-window)
15 | - [Cropvideo](_cropvideo.md#cropvideo)
16 | - [Directograms](_directograms.md#directograms)
17 | - [Filter](_filter.md#filter)
18 | - [Flow](_flow.md#flow)
19 | - [Grid](_grid.md#grid)
20 | - [History](_history.md#history)
21 | - [Impacts](_impacts.md#impacts)
22 | - [Info](_info.md#info)
23 | - [Input Test](_input_test.md#input-test)
24 | - [MgList](_mglist.md#mglist)
25 | - [Motionanalysis](_motionanalysis.md#motionanalysis)
26 | - [Motionvideo](_motionvideo.md#motionvideo)
27 | - [Motionvideo Mp Render](_motionvideo_mp_render.md#motionvideo-mp-render)
28 | - [Motionvideo Mp Run](_motionvideo_mp_run.md#motionvideo-mp-run)
29 | - [Pose](_pose.md#pose)
30 | - [Show](_show.md#show)
31 | - [Show Window](_show_window.md#show-window)
32 | - [Ssm](_ssm.md#ssm)
33 | - [Subtract](_subtract.md#subtract)
34 | - [Utils](_utils.md#utils)
35 | - [Video](_video.md#video)
36 | - [Videoadjust](_videoadjust.md#videoadjust)
37 | - [Videograms](_videograms.md#videograms)
38 | - [Videoreader](_videoreader.md#videoreader)
39 | - [Warp](_warp.md#warp)
40 |
41 | ## Examples
42 |
43 | [[find in source code]](https://github.com/fourMs/MGT-python/blob/master/musicalgestures/__init__.py#L21)
44 |
45 | ```python
46 | class Examples():
47 | def __init__():
48 | ```
49 |
--------------------------------------------------------------------------------
/mkdocs.yml:
--------------------------------------------------------------------------------
1 | site_name: "MGT-python"
2 | repo_url: "https://github.com/fourMs/MGT-python/"
3 | site_dir: "site"
4 | docs_dir: "docs"
5 |
6 | theme:
7 | name: readthedocs
8 | include_sidebar: true
9 |
--------------------------------------------------------------------------------
/musicalgestures/3rdparty/windows/wget/wget.exe:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fourMs/MGT-python/88ca0f526fce071b1817f89ce6e5a439ca9f9111/musicalgestures/3rdparty/windows/wget/wget.exe
--------------------------------------------------------------------------------
/musicalgestures/_360video.py:
--------------------------------------------------------------------------------
1 | import os
2 | from enum import Enum
3 | from functools import partial
4 | from musicalgestures._video import MgVideo
5 | from musicalgestures._utils import ffmpeg_cmd, get_length, generate_outfilename
6 |
7 |
8 | class Projection(Enum):
9 | """
10 | same as https://ffmpeg.org/ffmpeg-filters.html#v360.
11 | """
12 |
13 | e = 0
14 | equirect = 1
15 | c3x2 = 2
16 | c6x1 = 3
17 | c1x6 = 4
18 | eac = 5 # Equi-Angular Cubemap.
19 | flat = 6
20 | gnomonic = 7
21 | rectilinear = 8 # Regular video.
22 | dfisheye = 9 # Dual fisheye.
23 | barrel = 10
24 | fb = 11
25 | barrelsplit = 12 # Facebook’s 360 formats.
26 | sg = 13 # Stereographic format.
27 | mercator = 14 # Mercator format.
28 | ball = 15 # Ball format, gives significant distortion toward the back.
29 | hammer = 16 # Hammer-Aitoff map projection format.
30 | sinusoidal = 17 # Sinusoidal map projection format.
31 | fisheye = 18 # Fisheye projection.
32 | pannini = 19 # Pannini projection.
33 | cylindrical = 20 # Cylindrical projection.
34 | perspective = 21 # Perspective projection. (output only)
35 | tetrahedron = 22 # Tetrahedron projection.
36 | tsp = 23 # Truncated square pyramid projection.
37 | he = 24
38 | hequirect = 25 # Half equirectangular projection.
39 | equisolid = 26 # Equisolid format.
40 | og = 27 # Orthographic format.
41 | octahedron = 28 # Octahedron projection.
42 | cylindricalea = 29
43 |
44 | equirectangular = 30 # extra option for equirectangular
45 | erp = 31
46 |
47 | def __str__(self):
48 | # collapse all aliases of erp
49 | if self.name in ["equirectangular", "erp", "e"]:
50 | return "equirect"
51 | else:
52 | return self.name
53 |
54 | def __eq__(self, other):
55 | # collapse all aliases of erp
56 | if self.name in ["equirectangular", "erp", "e", "equirect"] and other.name in [
57 | "equirectangular",
58 | "erp",
59 | "e",
60 | "equirect",
61 | ]:
62 | return True
63 | elif self.name == other.name:
64 | return True
65 | else:
66 | return False
67 |
68 |
69 | # TODO: add settings for cameras and files
70 | CAMERA = {
71 | "gopro max": {
72 | "ext": "360",
73 | "projection": Projection.eac,
74 | },
75 | "insta360 x3": {
76 | "ext": "insv",
77 | "projection": Projection.fisheye,
78 | },
79 | "garmin virb 360": {
80 | "ext": "MP4",
81 | "projection": Projection.erp,
82 | },
83 | "ricoh theta xs00": {
84 | "ext": "MP4",
85 | "projection": Projection.erp,
86 | },
87 | }
88 |
89 |
90 | class Mg360Video(MgVideo):
91 | """
92 | Class for 360 videos.
93 | """
94 |
95 | def __init__(
96 | self,
97 | filename: str,
98 | projection: str | Projection,
99 | camera: str = None,
100 | **kwargs,
101 | ):
102 | """
103 | Args:
104 | filename (str): Path to the video file.
105 | projection (str, Projection): Projection type.
106 | camera (str): Camera type.
107 | """
108 | super().__init__(filename, **kwargs)
109 | self.filename = os.path.abspath(self.filename)
110 | self.projection = self._parse_projection(projection)
111 |
112 | if camera is None:
113 | self.camera = None
114 | elif camera.lower() in CAMERA:
115 | self.camera = CAMERA[camera.lower()]
116 | else:
117 | raise Warning(f"Camera type '{camera}' not recognized.")
118 |
119 | # override self.show() with extra ipython_kwarg embed=True
120 | self.show = partial(self.show, embed=True)
121 |
122 | def convert_projection(
123 | self,
124 | target_projection: Projection | str,
125 | options: dict[str, str] = None,
126 | print_cmd: bool = False,
127 | ):
128 | """
129 | Convert the video to a different projection.
130 | Args:
131 | target_projection (Projection): Target projection.
132 | options (dict[str, str], optional): Options for the conversion. Defaults to None.
133 | print_cmd (bool, optional): Print the ffmpeg command. Defaults to False.
134 | """
135 | target_projection = self._parse_projection(target_projection)
136 |
137 | if target_projection == self.projection:
138 | print(f"{self} is already in target projection {target_projection}.")
139 | return
140 | else:
141 | output_name = generate_outfilename(
142 | f"{self.filename.split('.')[0]}_{target_projection}.mp4"
143 | )
144 |
145 | # parse options
146 | if options:
147 | options = "".join([f"{k}={options[k]}:" for k in options])[:-1]
148 | cmds = [
149 | "ffmpeg",
150 | "-i",
151 | self.filename,
152 | "-vf",
153 | f"v360={self.projection}:{target_projection}:{options}",
154 | output_name,
155 | ]
156 | else:
157 | cmds = [
158 | "ffmpeg",
159 | "-i",
160 | self.filename,
161 | "-vf",
162 | f"v360={self.projection}:{target_projection}",
163 | output_name,
164 | ]
165 |
166 | # execute conversion
167 | ffmpeg_cmd(
168 | cmds,
169 | get_length(self.filename),
170 | pb_prefix=f"Converting projection to {target_projection}:",
171 | print_cmd=print_cmd,
172 | )
173 | self.filename = output_name
174 | self.projection = target_projection
175 |
176 | def _parse_projection(self, projection: str | Projection):
177 | """
178 | Parse projection type.
179 | Args:
180 | projection (str): Projection type.
181 | """
182 | if isinstance(projection, str):
183 | try:
184 | return Projection[projection.lower()]
185 | except KeyError:
186 | raise ValueError(
187 | f"Projection type '{projection}' not recognized. See `Projection` for available options."
188 | )
189 | elif isinstance(projection, Projection):
190 | return projection
191 | else:
192 | raise TypeError(f"Unsupported projection type: '{type(projection)}'.")
193 |
--------------------------------------------------------------------------------
/musicalgestures/__init__.py:
--------------------------------------------------------------------------------
1 | import os
2 | from musicalgestures._input_test import mg_input_test
3 | from musicalgestures._videoreader import mg_videoreader
4 | from musicalgestures._flow import Flow
5 | from musicalgestures._audio import MgAudio
6 | from musicalgestures._video import MgVideo
7 | from musicalgestures._360video import Mg360Video
8 | from musicalgestures._utils import (
9 | MgFigure,
10 | MgImage,
11 | convert,
12 | convert_to_mp4,
13 | get_framecount,
14 | ffmpeg_cmd,
15 | get_length,
16 | generate_outfilename,
17 | )
18 | from musicalgestures._mglist import MgList
19 |
20 |
21 | class Examples:
22 | def __init__(self):
23 | module_path = os.path.realpath(os.path.dirname(__file__)).replace("\\", "/")
24 | # module_path = os.path.abspath(os.path.dirname(__file__))
25 | self.dance = module_path + "/examples/dancer.avi"
26 | self.pianist = module_path + "/examples/pianist.avi"
27 | self.notebook = module_path + "/MusicalGesturesToolbox.ipynb"
28 |
29 |
30 | examples = Examples()
31 |
--------------------------------------------------------------------------------
/musicalgestures/_blend.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import os, subprocess
3 |
4 | from musicalgestures._utils import MgImage, generate_outfilename, get_framecount, get_length, ffmpeg_cmd
5 |
6 |
7 | def mg_blend_image(self, filename=None, mode='all_mode', component_mode='average', target_name=None, overwrite=False):
8 | """
9 | Finds and saves a blended image of an input video file using FFmpeg.
10 | The FFmpeg tblend (time blend) filter takes two consecutive frames from one single stream, and outputs the result obtained by blending the new frame on top of the old frame.
11 |
12 | Args:
13 | filename (str, optional): Path to the input video file. If None, the video file of the MgObject is used. Defaults to None.
14 | mode (str, optional): Set blend mode for specific pixel component or all pixel components. Accepted options are 'c0_mode', 'c1_mode', c2_mode', 'c3_mode' and 'all_mode'. Defaults to 'all_mode'.
15 | component_mode (str, optional): Component mode of the FFmpeg tblend. Available values for component modes can be accessed here: https://ffmpeg.org/ffmpeg-filters.html#blend-1. Defaults to 'average'.
16 | target_name (str, optional): The name of the output video. Defaults to None (which assumes that the input filename with the component mode suffix should be used).
17 | overwrite (bool, optional): Whether to allow overwriting existing files or to automatically increment target filenames to avoid overwriting. Defaults to False.
18 |
19 | Returns:
20 | MgImage: A new MgImage pointing to the output image file.
21 | """
22 |
23 | if filename == None:
24 | filename = self.filename
25 |
26 | of, fex = os.path.splitext(filename)
27 |
28 | if target_name == None:
29 | target_name = of + f'_{component_mode}.png'
30 | if not overwrite:
31 | target_name = generate_outfilename(target_name)
32 |
33 | # Get the number of frames
34 | frames = get_framecount(filename)
35 | # Get the number of times all frames can be divided
36 | divider = int(np.ceil(np.log(frames / 2) / np.log(2)))
37 |
38 | # Set average blur
39 | if self.blur.lower() == 'average':
40 | cmd_filter += 'avgblur=sizeX=10:sizeY=10,'
41 |
42 | # Define ffmpeg command
43 | cmd = ['ffmpeg', '-y', '-i', self.filename]
44 |
45 | cmd_filter = ''
46 | # set color mode
47 | if self.color == True:
48 | pixformat = 'gbrp'
49 | else:
50 | pixformat = 'gray'
51 | cmd_filter += f'format={pixformat},'
52 |
53 | # Set frame blend every two frames
54 | cmd_filter += f'tblend={mode}={component_mode},framestep=2,' * divider + 'setpts=1*PTS'
55 | cmd_end = ['-frames:v', '1', target_name]
56 | cmd += ['-vf', cmd_filter] + cmd_end
57 |
58 | # Run the command using ffmpeg and wait for it to finish
59 | ffmpeg_cmd(cmd, get_length(self.filename), pb_prefix='Rendering blended image:')
60 |
61 | # Save result as the blended image for parent MgObject
62 | self.blend_image = MgImage(target_name)
63 |
64 | return self.blend_image
65 |
--------------------------------------------------------------------------------
/musicalgestures/_centerface.py:
--------------------------------------------------------------------------------
1 | import os
2 | import cv2
3 | import numpy as np
4 |
5 | import musicalgestures
6 |
7 | class CenterFace(object):
8 |
9 | def __init__(self, landmarks=True):
10 |
11 | module_path = os.path.abspath(os.path.dirname(musicalgestures.__file__))
12 |
13 | self.landmarks = landmarks
14 | self.net = cv2.dnn.readNetFromONNX(module_path + '/models/centerface.onnx')
15 | self.img_h_new, self.img_w_new, self.scale_h, self.scale_w = 0, 0, 0, 0
16 |
17 | def __call__(self, img, height, width, threshold=0.5):
18 | self.img_h_new, self.img_w_new, self.scale_h, self.scale_w = self.transform(height, width)
19 | return self.inference_opencv(img, threshold)
20 |
21 | def inference_opencv(self, img, threshold):
22 | blob = cv2.dnn.blobFromImage(img, scalefactor=1.0, size=(self.img_w_new, self.img_h_new), mean=(0, 0, 0), swapRB=True, crop=False)
23 | self.net.setInput(blob)
24 | if self.landmarks:
25 | heatmap, scale, offset, lms = self.net.forward(["537", "538", "539", '540'])
26 | else:
27 | heatmap, scale, offset = self.net.forward(["535", "536", "537"])
28 | return self.postprocess(heatmap, lms, offset, scale, threshold)
29 |
30 | def transform(self, h, w):
31 | img_h_new, img_w_new = int(np.ceil(h / 32) * 32), int(np.ceil(w / 32) * 32)
32 | scale_h, scale_w = img_h_new / h, img_w_new / w
33 | return img_h_new, img_w_new, scale_h, scale_w
34 |
35 | def postprocess(self, heatmap, lms, offset, scale, threshold):
36 | if self.landmarks:
37 | dets, lms = self.decode(heatmap, scale, offset, lms, (self.img_h_new, self.img_w_new), threshold=threshold)
38 | else:
39 | dets = self.decode(heatmap, scale, offset, None, (self.img_h_new, self.img_w_new), threshold=threshold)
40 | if len(dets) > 0:
41 | dets[:, 0:4:2], dets[:, 1:4:2] = dets[:, 0:4:2] / self.scale_w, dets[:, 1:4:2] / self.scale_h
42 | if self.landmarks:
43 | lms[:, 0:10:2], lms[:, 1:10:2] = lms[:, 0:10:2] / self.scale_w, lms[:, 1:10:2] / self.scale_h
44 | else:
45 | dets = np.empty(shape=[0, 5], dtype=np.float32)
46 | if self.landmarks:
47 | lms = np.empty(shape=[0, 10], dtype=np.float32)
48 | if self.landmarks:
49 | return dets, lms
50 | else:
51 | return dets
52 |
53 | def decode(self, heatmap, scale, offset, landmark, size, threshold=0.1):
54 | heatmap = np.squeeze(heatmap)
55 | scale0, scale1 = scale[0, 0, :, :], scale[0, 1, :, :]
56 | offset0, offset1 = offset[0, 0, :, :], offset[0, 1, :, :]
57 | c0, c1 = np.where(heatmap > threshold)
58 | if self.landmarks:
59 | boxes, lms = [], []
60 | else:
61 | boxes = []
62 | if len(c0) > 0:
63 | for i in range(len(c0)):
64 | s0, s1 = np.exp(scale0[c0[i], c1[i]]) * 4, np.exp(scale1[c0[i], c1[i]]) * 4
65 | o0, o1 = offset0[c0[i], c1[i]], offset1[c0[i], c1[i]]
66 | s = heatmap[c0[i], c1[i]]
67 | x1, y1 = max(0, (c1[i] + o1 + 0.5) * 4 - s1 / 2), max(0, (c0[i] + o0 + 0.5) * 4 - s0 / 2)
68 | x1, y1 = min(x1, size[1]), min(y1, size[0])
69 | boxes.append([x1, y1, min(x1 + s1, size[1]), min(y1 + s0, size[0]), s])
70 | if self.landmarks:
71 | lm = []
72 | for j in range(5):
73 | lm.append(landmark[0, j * 2 + 1, c0[i], c1[i]] * s1 + x1)
74 | lm.append(landmark[0, j * 2, c0[i], c1[i]] * s0 + y1)
75 | lms.append(lm)
76 | boxes = np.asarray(boxes, dtype=np.float32)
77 | keep = self.nms(boxes[:, :4], boxes[:, 4], 0.3)
78 | boxes = boxes[keep, :]
79 | if self.landmarks:
80 | lms = np.asarray(lms, dtype=np.float32)
81 | lms = lms[keep, :]
82 | if self.landmarks:
83 | return boxes, lms
84 | else:
85 | return boxes
86 |
87 | def nms(self, boxes, scores, nms_thresh):
88 | x1 = boxes[:, 0]
89 | y1 = boxes[:, 1]
90 | x2 = boxes[:, 2]
91 | y2 = boxes[:, 3]
92 | areas = (x2 - x1 + 1) * (y2 - y1 + 1)
93 | order = np.argsort(scores)[::-1]
94 | num_detections = boxes.shape[0]
95 | suppressed = np.zeros((num_detections,), dtype=bool)
96 |
97 | keep = []
98 | for _i in range(num_detections):
99 | i = order[_i]
100 | if suppressed[i]:
101 | continue
102 | keep.append(i)
103 |
104 | ix1 = x1[i]
105 | iy1 = y1[i]
106 | ix2 = x2[i]
107 | iy2 = y2[i]
108 | iarea = areas[i]
109 |
110 | for _j in range(_i + 1, num_detections):
111 | j = order[_j]
112 | if suppressed[j]:
113 | continue
114 |
115 | xx1 = max(ix1, x1[j])
116 | yy1 = max(iy1, y1[j])
117 | xx2 = min(ix2, x2[j])
118 | yy2 = min(iy2, y2[j])
119 | w = max(0, xx2 - xx1 + 1)
120 | h = max(0, yy2 - yy1 + 1)
121 |
122 | inter = w * h
123 | ovr = inter / (iarea + areas[j] - inter)
124 | if ovr >= nms_thresh:
125 | suppressed[j] = True
126 |
127 | return keep
128 |
--------------------------------------------------------------------------------
/musicalgestures/_cropping_window.py:
--------------------------------------------------------------------------------
1 | import cv2
2 | import numpy as np
3 | import argparse
4 |
5 | global frame_mask, drawing, g_val, x_start, x_stop, y_start, y_stop
6 | x_start, y_start = -1, -1
7 | x_stop, y_stop = -1, -1
8 | drawing = False
9 |
10 |
11 | def draw_rectangle(event, x, y, flags, param):
12 | """
13 | Helper function to render a cropping window to the user in case of manual cropping, using cv2.
14 | """
15 | global x_start, y_start, x_stop, y_stop, drawing, frame_mask
16 | if event == cv2.EVENT_LBUTTONDOWN:
17 | frame_mask = np.zeros(param.shape)
18 | drawing = True
19 | x_start, y_start = x, y
20 |
21 | elif event == cv2.EVENT_MOUSEMOVE:
22 | if drawing == True:
23 | frame_mask = np.zeros(param.shape)
24 | cv2.rectangle(frame_mask, (x_start, y_start),
25 | (x, y), (g_val, g_val, g_val), 1)
26 |
27 | elif event == cv2.EVENT_LBUTTONUP:
28 | drawing = False
29 | x_stop, y_stop = x, y
30 | cv2.rectangle(frame_mask, (x_start, y_start),
31 | (x, y), (g_val, g_val, g_val), 1)
32 |
33 |
34 | parser = argparse.ArgumentParser(
35 | description='Create (memory-safe) user interface for manual cropping.')
36 |
37 | parser.add_argument('Path', metavar='path', type=str, help='path to file')
38 | parser.add_argument('Ratio', metavar='ratio', type=float, help='scale ratio')
39 | parser.add_argument('Width', metavar='width', type=int, help='scaled width')
40 | parser.add_argument('Height', metavar='height', type=int, help='scaled height')
41 |
42 | args = parser.parse_args()
43 |
44 | imgpath = args.Path
45 | scale_ratio = args.Ratio
46 | scaled_width, scaled_height = args.Width, args.Height
47 |
48 | frame = cv2.imread(imgpath)
49 | frame_scaled = cv2.resize(frame, (scaled_width, scaled_height))
50 | frame_mask = np.zeros(frame_scaled.shape)
51 | name_str = 'Draw rectangle and press "C" to crop'
52 | cv2.namedWindow(name_str, cv2.WINDOW_AUTOSIZE)
53 | cv2.setMouseCallback(name_str, draw_rectangle, param=frame_scaled)
54 | g_val = 220
55 | while(1):
56 | cv2.imshow(name_str, frame_scaled*(frame_mask != g_val) +
57 | frame_mask.astype(np.uint8))
58 | k = cv2.waitKey(1) & 0xFF
59 | if k == ord('c') or k == ord('C'):
60 | break
61 | cv2.destroyAllWindows()
62 |
63 | if x_stop < x_start:
64 | temp = x_start
65 | x_start = x_stop
66 | x_stop = temp
67 | if y_stop < y_start:
68 | temp = y_start
69 | y_start = y_stop
70 | y_stop = temp
71 |
72 | w, h, x, y = x_stop - x_start, y_stop - y_start, x_start, y_start
73 |
74 | if scale_ratio < 1:
75 | w, h, x, y = [int(elem / scale_ratio) for elem in [w, h, x, y]]
76 |
77 | print(w, h, x, y)
78 |
--------------------------------------------------------------------------------
/musicalgestures/_directograms.py:
--------------------------------------------------------------------------------
1 | import cv2
2 | import os
3 | import numpy as np
4 | from numba import jit
5 | import matplotlib.pyplot as plt
6 | from matplotlib import colors
7 |
8 | import musicalgestures
9 | from musicalgestures._filter import filter_frame
10 | from musicalgestures._utils import MgProgressbar, MgFigure, convert_to_avi, generate_outfilename
11 |
12 | HISTOGRAM_BINS = np.linspace(-np.pi, np.pi, 100)
13 |
14 | @jit(nopython=True)
15 | def matrix3D_norm(matrix):
16 | n, m, o = matrix.shape
17 | norm = np.zeros((n,m))
18 |
19 | for i in np.arange(n):
20 | for j in np.arange(m):
21 | norm[i][j] = np.sqrt(np.sum(np.abs(matrix[i][j]) ** 2)) # Frobenius norm
22 | return norm
23 |
24 | @jit(nopython=True)
25 | def directogram(optical_flow):
26 | norms = matrix3D_norm(optical_flow) # norm of the matrix
27 | # Compute angles for the optical flow of the input frame
28 | angles = np.arctan2(optical_flow[:, :, 1], optical_flow[:, :, 0])
29 | # Return the indices of the histogram bins to which each value in the angles array belongs
30 | angle_indicators = np.digitize(angles, HISTOGRAM_BINS[:-1])
31 | directogram = np.zeros((len(HISTOGRAM_BINS),))
32 | # Motion for each angle indicators is created by binning and summing optical flow vectors for every pixel
33 | for y in range(optical_flow.shape[0]):
34 | for x in range(optical_flow.shape[1]):
35 | directogram[angle_indicators[y, x]] += norms[y, x]
36 |
37 | return directogram
38 |
39 | def mg_directograms(self, title=None, filtertype='Adaptative', thresh=0.05, kernel_size=5, target_name=None, overwrite=False):
40 | """
41 | Compute a directogram to factor the magnitude of motion into different angles.
42 | Each columun of the directogram is computed as the weighted histogram (HISTOGRAM_BINS) of angles for the optical flow of an input frame.
43 |
44 | Source: Abe Davis -- [Visual Rhythm and Beat](http://www.abedavis.com/files/papers/VisualRhythm_Davis18.pdf) (section 4.1)
45 |
46 | Args:
47 | title (str, optional): Optionally add title to the figure. Defaults to None, which uses 'Directogram' as a title. Defaults to None.
48 | filtertype (str, optional): 'Regular' turns all values below `thresh` to 0. 'Binary' turns all values below `thresh` to 0, above `thresh` to 1. 'Blob' removes individual pixels with erosion method. 'Adaptative' perform adaptative threshold as the weighted sum of 11 neighborhood pixels where weights are a Gaussian window. Defaults to 'Adaptative'.
49 | thresh (float, optional): Eliminates pixel values less than given threshold. Ranges from 0 to 1. Defaults to 0.05.
50 | kernel_size (int, optional): Size of structuring element. Defaults to 5.
51 | target_name (str, optional): Target output name for the directogram. Defaults to None (which assumes that the input filename with the suffix "_dg" should be used).
52 | overwrite (bool, optional): Whether to allow overwriting existing files or to automatically increment target filenames to avoid overwriting. Defaults to False.
53 |
54 | Returns:
55 | MgFigure: A MgFigure object referring to the internal figure and its data.
56 | """
57 |
58 | of, fex = os.path.splitext(self.filename)
59 |
60 | if fex != '.avi':
61 | # first check if there already is a converted version, if not create one and register it to self
62 | if "as_avi" not in self.__dict__.keys():
63 | file_as_avi = convert_to_avi(of + fex, overwrite=overwrite)
64 | # register it as the avi version for the file
65 | self.as_avi = musicalgestures.MgVideo(file_as_avi)
66 | # point of and fex to the avi version
67 | of, fex = self.as_avi.of, self.as_avi.fex
68 | filename = of + fex
69 | else:
70 | filename = self.filename
71 |
72 | vidcap = cv2.VideoCapture(filename)
73 | fps = int(vidcap.get(cv2.CAP_PROP_FPS))
74 | length = int(vidcap.get(cv2.CAP_PROP_FRAME_COUNT))
75 |
76 | pb = MgProgressbar(total=length, prefix='Rendering directogram:')
77 |
78 | directograms = []
79 | directogram_times = np.zeros((length-1,))
80 | ret, frame = vidcap.read()
81 | prev_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
82 |
83 | i = 0
84 |
85 | while vidcap.isOpened():
86 |
87 | ret, frame = vidcap.read()
88 |
89 | if ret == True:
90 | next_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
91 |
92 | if filtertype == 'Adaptative':
93 | next_frame = cv2.adaptiveThreshold(next_frame, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 11, 2)
94 | else:
95 | # Frame Thresholding: apply threshold filter and median filter (of `kernel_size`x`kernel_size`) to the frame.
96 | next_frame = filter_frame(next_frame, filtertype, thresh, kernel_size)
97 |
98 | # Renders a dense optical flow video of the input video file using `cv2.calcOpticalFlowFarneback()`.
99 | # The description of the matching parameters are taken from the cv2 documentation.
100 | optical_flow = cv2.calcOpticalFlowFarneback(prev_frame, next_frame, None, 0.5, 3, 15, 3, 5, 1.2, 0)
101 | directograms.append(directogram(optical_flow))
102 | directogram_times[i] = len(directograms) / fps
103 | prev_frame = next_frame
104 |
105 | else:
106 | pb.progress(length)
107 | break
108 |
109 | pb.progress(i)
110 | i += 1
111 |
112 | vidcap.release()
113 |
114 | # Create and save the figure
115 | fig, ax = plt.subplots(figsize=(12, 4), dpi=300)
116 | fig.patch.set_facecolor('white')
117 | fig.patch.set_alpha(1)
118 |
119 | # add title
120 | if title == None:
121 | title = os.path.basename(f'Directogram (filter type: {filtertype})')
122 |
123 | fig.suptitle(title, fontsize=16)
124 |
125 | ax.imshow(np.array(directograms).T, extent=[directogram_times.min(), directogram_times.max(),
126 | HISTOGRAM_BINS.min(), HISTOGRAM_BINS.max()], norm=colors.PowerNorm(gamma=1.0/2.0), aspect='auto')
127 |
128 | ax.set_ylabel('Angle [Radians]')
129 | ax.set_xlabel('Time [Seconds]')
130 |
131 | if target_name == None:
132 | target_name = of + '_dg.png'
133 |
134 | else:
135 | # enforce png
136 | target_name = os.path.splitext(target_name)[0] + '.png'
137 | if not overwrite:
138 | target_name = generate_outfilename(target_name)
139 |
140 | plt.savefig(target_name, format='png', transparent=False)
141 | plt.close()
142 |
143 | # Create MgFigure
144 | data = {
145 | "FPS": fps,
146 | "path": self.of,
147 | "directogram times": directogram_times,
148 | "directogram": np.array(directograms),
149 | }
150 |
151 | mgf = MgFigure(
152 | figure=fig,
153 | figure_type='video.directogram',
154 | data=data,
155 | layers=None,
156 | image=target_name)
157 |
158 | return mgf
159 |
--------------------------------------------------------------------------------
/musicalgestures/_filter.py:
--------------------------------------------------------------------------------
1 | from scipy.signal import medfilt2d
2 | import cv2
3 | import numpy as np
4 | import matplotlib
5 | from musicalgestures._utils import get_widthheight
6 |
7 |
8 | def filter_frame(motion_frame, filtertype, thresh, kernel_size):
9 | """
10 | Applies a threshold filter and then a median filter (of `kernel_size`x`kernel_size`) to an image or videoframe.
11 |
12 | Args:
13 | motion_frame (np.array(uint8)): Input motion image.
14 | filtertype (str): 'Regular' turns all values below `thresh` to 0. 'Binary' turns all values below `thresh` to 0, above `thresh` to 1. 'Blob' removes individual pixels with erosion method.
15 | thresh (float): A number in the range of 0 to 1. Eliminates pixel values less than given threshold.
16 | kernel_size (int): Size of structuring element.
17 |
18 | Returns:
19 | np.array(uint8): The filtered frame.
20 | """
21 |
22 | if filtertype.lower() == 'regular':
23 | motion_frame = (motion_frame > thresh*255)*motion_frame
24 | motion_frame = medfilt2d(motion_frame, kernel_size)
25 | elif filtertype.lower() == 'binary':
26 | motion_frame = (motion_frame > thresh*255)*255
27 | motion_frame = medfilt2d(motion_frame.astype(np.uint8), kernel_size)
28 | elif filtertype.lower() == 'blob':
29 | motion_frame = cv2.erode(motion_frame, np.ones([kernel_size, kernel_size]), iterations=1)
30 | return motion_frame
31 |
32 | def filter_frame_ffmpeg(filename, cmd, color, blur, filtertype, threshold, kernel_size, use_median, invert=False):
33 |
34 | cmd_filter = ''
35 |
36 | # set color mode
37 | if color == True:
38 | pixformat = 'gbrp'
39 | else:
40 | pixformat = 'gray'
41 | cmd_filter += f'format={pixformat},'
42 |
43 | # set blur
44 | if blur.lower() == 'average':
45 | cmd_filter += 'avgblur=sizeX=10:sizeY=10,'
46 |
47 | # set frame difference
48 | if filtertype.lower() == 'regular':
49 | cmd_filter += 'tblend=all_mode=difference[diff],'
50 | else:
51 | cmd_filter += 'tblend=all_mode=difference,'
52 |
53 | width, height = get_widthheight(filename)
54 |
55 | thresh_color = matplotlib.colors.to_hex([threshold, threshold, threshold])
56 | thresh_color = '0x' + thresh_color[1:]
57 |
58 | # set threshold
59 | if filtertype.lower() == 'regular':
60 | cmd += ['-f', 'lavfi', '-i', f'color={thresh_color},scale={width}:{height}',
61 | '-f', 'lavfi', '-i', f'color=black,scale={width}:{height}']
62 | cmd_filter += '[0:v][1][2][diff]threshold,'
63 | elif filtertype.lower() == 'binary':
64 | cmd += ['-f', 'lavfi', '-i', f'color={thresh_color},scale={width}:{height}', '-f', 'lavfi', '-i',
65 | f'color=black,scale={width}:{height}', '-f', 'lavfi', '-i', f'color=white,scale={width}:{height}']
66 | cmd_filter += 'threshold,'
67 | elif filtertype.lower() == 'blob':
68 | # cmd_filter += 'erosion,' # erosion is always 3x3 so we will hack it with a median filter with percentile=0 which will pick minimum values
69 | cmd_filter += f'median=radius={kernel_size}:percentile=0,'
70 |
71 | # set median
72 | if use_median and filtertype.lower() != 'blob': # makes no sense to median-filter the eroded video
73 | cmd_filter += f'median=radius={kernel_size},'
74 |
75 | if invert:
76 | cmd_filter += 'negate,'
77 |
78 | return cmd, cmd_filter
--------------------------------------------------------------------------------
/musicalgestures/_grid.py:
--------------------------------------------------------------------------------
1 | import os, subprocess
2 | import cv2
3 | import numpy as np
4 | from musicalgestures._utils import MgImage, generate_outfilename, ffmpeg_cmd, get_length
5 |
6 | def mg_grid(self, height=300, rows=3, cols=3, padding=0, margin=0, target_name=None, overwrite=False, return_array=False):
7 | """
8 | Generates frame strip video preview using ffmpeg.
9 |
10 | Args:
11 | height (int, optional): Frame height, width is adjusted automatically to keep the correct aspect ratio. Defaults to 300.
12 | rows (int, optional): Number of rows of the grid. Defaults to 3.
13 | cols (int, optional): Number of columns of the grid. Defaults to 3.
14 | padding (int, optional): Padding size between the frames. Defaults to 0.
15 | margin (int, optional): Margin size for the grid. Defaults to 0.
16 | target_name ([type], optional): Target output name for the grid image. Defaults to None.
17 | overwrite (bool, optional): Whether to allow overwriting existing files or to automatically increment target filenames to avoid overwriting. Defaults to False.
18 | return_array (bool, optional): Whether to return an array of not. If set to False the function writes the grid image to disk. Defaults to False.
19 |
20 | Returns:
21 | MgImage: An MgImage object referring to the internal grid image.
22 | """
23 |
24 | of, fex = os.path.splitext(self.filename)
25 | if target_name == None:
26 | target_name = of + '_grid.png'
27 | else:
28 | # Enforce png
29 | target_name = of + '_grid.png'
30 | if not overwrite:
31 | target_name = generate_outfilename(target_name)
32 |
33 | # Get the number of frames
34 | cap = cv2.VideoCapture(self.filename)
35 | nb_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
36 | nth_frame = int(nb_frames / (rows*cols))
37 |
38 | # Define the grid specifications
39 | width = int((float(self.width) / self.height) * height)
40 | grid = f"select=not(mod(n\,{nth_frame})),scale={width}:{height},tile={cols}x{rows}:padding={padding}:margin={margin}"
41 |
42 | # Declare the ffmpeg commands
43 | if return_array:
44 | cmd = ['ffmpeg', '-y', '-i', self.filename, '-frames', '1', '-q:v', '0', '-vf', grid]
45 | process = ffmpeg_cmd(cmd, get_length(self.filename), pb_prefix='Rendering video frame grid:', pipe='load')
46 |
47 | # Convert bytes to array and convert from BGR to RGB
48 | array = np.frombuffer(process.stdout, dtype=np.uint8).reshape([height*rows, int(width*cols), 3])[...,::-1]
49 |
50 | return array
51 | else:
52 | cmd = ['ffmpeg', '-i', self.filename, '-y', '-frames', '1', '-q:v', '0', '-vf', grid, target_name]
53 | ffmpeg_cmd(cmd, get_length(self.filename), pb_prefix='Rendering video frame grid:')
54 | # Initialize the MgImage object
55 | img = MgImage(target_name)
56 |
57 | return img
--------------------------------------------------------------------------------
/musicalgestures/_info.py:
--------------------------------------------------------------------------------
1 | import os, subprocess
2 | import pandas as pd
3 | from matplotlib import pyplot as plt
4 |
5 | from musicalgestures._utils import convert_to_mp4
6 |
7 |
8 | def mg_info(self, type=None, autoshow=True, overwrite=False):
9 | """
10 | Returns info about video/audio/format file using ffprobe.
11 |
12 | Args:
13 | type (str, optional): Type of information to retrieve. Possible choice are 'audio', 'video', 'format' or 'frame'. Defaults to None (which gives info about video, audio and format).
14 | autoshow (bool, optional): Whether to show the I/P/B frames figure automatically. Defaults to True. NB: The type argument needs to be set to 'frame'.
15 | overwrite (bool, optional): Whether to allow overwriting existing files or to automatically increment target filename to avoid overwriting. Defaults to False.
16 |
17 | Returns:
18 | str: decoded ffprobe output (stdout) as a list containing three dictionaries for video, audio and format metadata.
19 | """
20 |
21 | # Get streams and format information (https://ffmpeg.org/ffprobe.html)
22 | cmd = ["ffprobe", "-hide_banner", "-loglevel", "quiet", "-show_streams", "-show_format", self.filename]
23 | if type == 'frame':
24 | if self.fex != '.mp4':
25 | # Convert video file to mp4
26 | self.filename = convert_to_mp4(self.of + self.fex, overwrite=overwrite)
27 | self.of, self.fex = os.path.splitext(self.filename)
28 | cmd = ["ffprobe", "-hide_banner", "-loglevel", "quiet", "-v", "error", "-select_streams", "v:0", "-show_entries", "frame=pkt_size, pict_type", self.filename]
29 |
30 | process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, universal_newlines=True)
31 | try:
32 | out, _ = process.communicate(timeout=10)
33 | splitted = out.split('\n')
34 | except subprocess.TimeoutExpired:
35 | process.kill()
36 | out, err = process.communicate()
37 | splitted = out.split('\n')
38 |
39 | frame = []
40 |
41 | # Retrieve information and export it in a dictionary
42 | if type == 'frame':
43 | current_frame = {}
44 | for line in [i for i in splitted if i not in ('[SIDE_DATA]', '[/SIDE_DATA]', '')]:
45 | if line == '[/FRAME]':
46 | frame.append(current_frame)
47 | current_frame = {}
48 | elif line != '[FRAME]':
49 | pair = line.split('=')
50 | current_frame[pair[0]] = pair[1]
51 | else:
52 | pass
53 |
54 | ipb_frames = {
55 | 'frame index': range(len(frame)),
56 | 'size (bytes)': [int(f['pkt_size']) for f in frame],
57 | 'type': [f['pict_type'] for f in frame]
58 | }
59 |
60 | df = pd.DataFrame.from_dict(ipb_frames)
61 |
62 | if autoshow:
63 | fig, ax = plt.subplots(figsize=(12,4), dpi=300)
64 | fig.patch.set_facecolor('white') # make sure background is white
65 | fig.patch.set_alpha(1)
66 |
67 | for i, (label, series) in enumerate(df.groupby('type')):
68 | plot_frames(series, label, index=i)
69 |
70 | # Get handles and labels
71 | handles, labels = plt.gca().get_legend_handles_labels()
72 | order = [1,2,0] # specify order of items in legend
73 | # Add legend to plot
74 | ax.legend([handles[idx] for idx in order],[labels[idx] for idx in order])
75 | ax.set_xlabel('Frame index')
76 | ax.set_ylabel('Size (bytes)')
77 | fig.tight_layout()
78 | else:
79 | return df
80 |
81 | else:
82 | for i, info in enumerate(splitted):
83 | if info == "[STREAM]" or info == "[SIDE_DATA]" or info == "[FORMAT]":
84 | frame.append(dict())
85 | i +=1
86 | elif info == "[/STREAM]" or info == "[/SIDE_DATA]" or info == "[/FORMAT]" or info == "":
87 | i +=1
88 | else:
89 | try:
90 | key, value = splitted[i].split('=')
91 | frame[-1][key] = value
92 | except ValueError:
93 | key = splitted[i]
94 | frame[-1][key] = ''
95 |
96 | if len(frame) > 3:
97 | # Merge video stream with side data dictionary
98 | frame[0] = {**frame[0], **frame[1]}
99 | frame.pop(1)
100 |
101 | # Create a pandas dataframe
102 | df = pd.DataFrame.from_dict(frame)
103 |
104 | df.insert(0, 'codec_type', df.pop('codec_type')) # move codec type column
105 | df.pop('index') # remove index column
106 | df = df[df.codec_type.notna()] # remove rows with nan values in codec_type column
107 |
108 | if type is not None:
109 | return df[df.codec_type == type]
110 | else:
111 | return df
112 |
113 |
114 | def plot_frames(df, label, color_list=['#636EFA','#00CC96','#EF553B'], index=0):
115 | xs = df['frame index']
116 | ys = df['size (bytes)']
117 | # Plot the bar plot
118 | plt.bar(xs, ys, label=label + '-Frames', width=1, color=color_list[index])
--------------------------------------------------------------------------------
/musicalgestures/_input_test.py:
--------------------------------------------------------------------------------
1 | import subprocess
2 |
3 | class Error(Exception):
4 | """Base class for exceptions in this module."""
5 | pass
6 |
7 |
8 | class InputError(Error):
9 | """
10 | Exception raised for errors in the input.
11 |
12 | Args:
13 | Error (str): Explanation of the error.
14 | """
15 |
16 | def __init__(self, message):
17 | self.message = message
18 |
19 |
20 | def mg_input_test(filename, array, fps, filtertype, thresh, starttime, endtime, blur, skip, frames):
21 | """
22 | Gives feedback to user if initialization from input went wrong.
23 |
24 | Args:
25 | filename (str): Path to the input video file.
26 | array (np.ndarray, optional): Generates an MgVideo object from a video array. Defauts to None.
27 | fps (float, optional): The frequency at which consecutive images from the video array are captured or displayed. Defauts to None.
28 | filtertype (str): 'Regular' turns all values below `thresh` to 0. 'Binary' turns all values below `thresh` to 0, above `thresh` to 1. 'Blob' removes individual pixels with erosion method.
29 | thresh (float): A number in the range of 0 to 1. Eliminates pixel values less than given threshold.
30 | starttime (int/float): Trims the video from this start time (s).
31 | endtime (int/float): Trims the video until this end time (s).
32 | blur (str): 'Average' to apply a 10px * 10px blurring filter, 'None' otherwise.
33 | skip (int): Every n frames to discard. `skip=0` keeps all frames, `skip=1` skips every other frame.
34 | frames (int): Specify a fixed target number of frames to extract from the video.
35 |
36 | Raises:
37 | InputError: If the types or options are wrong in the input.
38 | """
39 |
40 | # Check if FFmpeg is installed
41 | try:
42 | subprocess.check_call(['ffmpeg', '-version'])
43 | except:
44 | msg = 'FFmpeg must be installed and accessible via the path environment variable.\nMore information on how to install FFmpeg: https://github.com/fourMs/MGT-python/wiki/0-%E2%80%90-Installation'
45 | raise InputError(msg)
46 |
47 | filenametest = type(filename) == str
48 |
49 | if filenametest:
50 | if array is not None:
51 | if fps is None:
52 | msg = 'Please specify frame per second (fps) parameter for generating video from array.'
53 | raise InputError(msg)
54 |
55 | if filtertype.lower() not in ['regular', 'binary', 'blob']:
56 | msg = 'Please specify a filter type as str: "Regular", "Binary" or "Blob"'
57 | raise InputError(msg)
58 |
59 | if blur.lower() not in ['average', 'none']:
60 | msg = 'Please specify a blur type as str: "Average" or "None"'
61 | raise InputError(msg)
62 |
63 | if not isinstance(thresh, (float, int)):
64 | msg = 'Please specify a threshold as a float between 0 and 1.'
65 | raise InputError(msg)
66 |
67 | if not isinstance(starttime, (float, int)):
68 | msg = 'Please specify a starttime as a float.'
69 | raise InputError(msg)
70 |
71 | if not isinstance(endtime, (float, int)):
72 | msg = 'Please specify a endtime as a float.'
73 | raise InputError(msg)
74 |
75 | if not isinstance(skip, int):
76 | msg = 'Please specify a skip as an integer of frames you wish to skip (Max = N frames).'
77 | raise InputError(msg)
78 |
79 | if not isinstance(frames, int):
80 | msg = 'Please specify a frames as an integer of fixed frames you wish to keep.'
81 | raise InputError(msg)
82 |
83 | else:
84 | msg = 'Minimum input for this function: filename as a str.'
85 | raise InputError(msg)
86 |
--------------------------------------------------------------------------------
/musicalgestures/_motionanalysis.py:
--------------------------------------------------------------------------------
1 | import cv2
2 | import numpy as np
3 |
4 |
5 | def centroid(image, width, height):
6 | """
7 | Computes the centroid and quantity of motion in an image or frame.
8 |
9 | Args:
10 | image (np.array(uint8)): The input image matrix for the centroid estimation function.
11 | width (int): The pixel width of the input video capture.
12 | height (int): The pixel height of the input video capture.
13 |
14 | Returns:
15 | np.array(2): X and Y coordinates of the centroid of motion.
16 | int: Quantity of motion: How large the change was in pixels.
17 | """
18 |
19 | image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
20 |
21 | x = np.arange(width)
22 | y = np.arange(height)
23 | # Calculates the sum of the pixels in the input image
24 | qom = cv2.sumElems(image)[0]
25 | mx = np.mean(image, axis=0)
26 | my = np.mean(image, axis=1)
27 |
28 | if np.sum(mx) != 0 and np.sum(my) != 0:
29 | comx = x.reshape(1, width)@mx.reshape(width, 1)/np.sum(mx)
30 | comy = y.reshape(1, height)@my.reshape(height, 1)/np.sum(my)
31 | else:
32 | comx = 0
33 | comy = 0
34 |
35 | com = np.zeros(2)
36 | com[0] = comx
37 | # The y-axis is flipped to fit a "normal" coordinate system
38 | com[1] = height-comy
39 |
40 | return com, int(qom)
41 |
42 | def area(motion_frame, height, width):
43 | # Area of Motion (AoM)
44 | aombite = []
45 | # Convert to gray scale
46 | gray = cv2.cvtColor(motion_frame, cv2.COLOR_BGR2GRAY)
47 | # Apply adaptative threshold on the video frame to make differences more visible for contour detection
48 | thresh = cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY_INV, 51, 2)
49 | contours, hierarchy = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
50 | # Get the largest contour to average the area of motion
51 | if len(contours) != 0:
52 | largest = contours[0]
53 | for contour in contours:
54 | if cv2.contourArea(contour) > cv2.contourArea(largest):
55 | largest = contour
56 | (x, y, w, h) = cv2.boundingRect(largest)
57 | # Append and normalize coordinates of the area of motion
58 | aombite.append([x/width, y/height, (x+w)/width,(y+h)/height])
59 | else:
60 | aombite.append([0,0,0,0])
61 |
62 | return aombite
--------------------------------------------------------------------------------
/musicalgestures/_show_window.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | import os
3 | import sys
4 | from musicalgestures._utils import unwrap_str
5 |
6 | if __name__ == '__main__':
7 | parser = argparse.ArgumentParser(description='Play video in a separate process')
8 | parser.add_argument('command', metavar='command', type=str, help='command')
9 | args = parser.parse_args()
10 | os.system(unwrap_str(args.command))
--------------------------------------------------------------------------------
/musicalgestures/_subtract.py:
--------------------------------------------------------------------------------
1 |
2 | import os
3 | import matplotlib
4 |
5 | import musicalgestures
6 | from musicalgestures._utils import generate_outfilename, pass_if_container_is, get_length, ffmpeg_cmd
7 |
8 | def mg_subtract(
9 | self,
10 | color=True,
11 | filtertype=None,
12 | threshold=0.05,
13 | blur=False,
14 | curves=0.15,
15 | use_median=False,
16 | kernel_size=5,
17 | bg_img=None,
18 | bg_color='#000000',
19 | target_name=None,
20 | overwrite=False):
21 | """
22 | Renders background subtraction using ffmpeg.
23 |
24 | Args:
25 | color (bool, optional): If False the input is converted to grayscale at the start of the process. This can significantly reduce render time. Defaults to True.
26 | filtertype (str, optional): 'Regular' turns all values below `thresh` to 0. 'Binary' turns all values below `thresh` to 0, above `thresh` to 1. 'Blob' removes individual pixels with erosion method. Defaults to 'Regular'.
27 | threshold (float, optional): Eliminates pixel values less than given threshold. Ranges from 0 to 1. Defaults to 0.05.
28 | blur (bool, optional): Whether to apply a smartblur ffmpeg filter or not. Defaults to False.
29 | curves (int, optional): Apply curves and equalisation threshold filter to subtract the background. Ranges from 0 to 1. Defaults to 0.15.
30 | use_median (bool, optional): If True the algorithm applies a median filter on the thresholded frame-difference stream. Defaults to False.
31 | kernel_size (int, optional): Size of the median filter (if `use_median=True`) or the erosion filter (if `filtertype='blob'`). Defaults to 5.
32 | bg_img (str, optional): Path to a background image (.png) that needs to be subtracted from the video. If set to None, it uses an average image of all frames in the video. Defaults to None.
33 | bg_color (str, optional): Set the background color in the video file in hex value. Defaults to '#000000' (black).
34 | target_name (str, optional): Target output name for the motiongram. Defaults to None.
35 | overwrite (bool, optional): Whether to allow overwriting existing files or to automatically increment target filenames to avoid overwriting. Defaults to False.
36 |
37 | Returns:
38 | MgVideo: A MgVideo as subtract for parent MgVideo
39 | """
40 |
41 | of, fex = os.path.splitext(self.filename)
42 |
43 | if target_name == None:
44 | target_name = of + '_subtracted.avi'
45 |
46 | if not overwrite:
47 | target_name = generate_outfilename(target_name)
48 |
49 | width, height = self.width, self.height
50 |
51 | if bg_img == None:
52 | # Render an average image of the video file for background subtraction
53 | bg_img = musicalgestures.MgVideo(self.filename).blend(component_mode='average').filename
54 | else:
55 | # Check if background image extension is .png or not
56 | pass_if_container_is(".png", bg_img)
57 |
58 | # Set input/output and background color to white
59 | cmd = ['ffmpeg', '-y', '-i', bg_img, '-i', self.filename]
60 | cmd_end = ['-shortest', '-pix_fmt', 'yuv420p', target_name]
61 | cmd_filter = f'color={bg_color}:size={width}x{height} [matte];[1:0]'
62 |
63 | # Set color mode
64 | if color == True:
65 | pixformat = 'gbrp'
66 | else:
67 | pixformat = 'gray'
68 |
69 | cmd_filter += f'format={pixformat}, split[mask][video];[0:0][mask]'
70 |
71 | # Set frame difference
72 | if filtertype is not None:
73 | if filtertype.lower() == 'regular':
74 | cmd_filter += 'blend=all_mode=difference[diff],'
75 | else:
76 | cmd_filter += 'blend=all_mode=difference,'
77 | else:
78 | cmd_filter += 'blend=all_mode=difference,'
79 |
80 | thresh_color = matplotlib.colors.to_hex([threshold, threshold, threshold])
81 | thresh_color = '0x' + thresh_color[1:]
82 |
83 | # Set threshold
84 | if filtertype is not None:
85 | if filtertype.lower() == 'regular':
86 | cmd += ['-f', 'lavfi', '-i', f'color={thresh_color},scale={width}:{height}',
87 | '-f', 'lavfi', '-i', f'color=black,scale={width}:{height}']
88 | cmd_filter += '[1][diff]threshold,'
89 | elif filtertype.lower() == 'binary':
90 | cmd += ['-f', 'lavfi', '-i', f'color={thresh_color},scale={width}:{height}', '-f', 'lavfi', '-i',
91 | f'color=black,scale={width}:{height}', '-f', 'lavfi', '-i', f'color=white,scale={width}:{height}']
92 | cmd_filter += ' threshold,'
93 | elif filtertype.lower() == 'blob':
94 | # cmd_filter += 'erosion,' # erosion is always 3x3 so we will hack it with a median filter with percentile=0 which will pick minimum values
95 | cmd_filter += f'median=radius={kernel_size}:percentile=0,'
96 |
97 | # Set median
98 | if use_median and filtertype.lower() != 'blob': # makes no sense to median-filter the eroded video
99 | cmd_filter += f'median=radius={kernel_size},'
100 |
101 | # Set curves and equalisation filtering to a range of values between 0.1 and 0.9
102 | new_curves = (((curves - 0) * (0.8 - 0.1)) / (1 - 0)) + 0.1
103 | cmd_filter += f"curves=m='0/0 {str(round(new_curves,2))}/0 {str(round(new_curves+0.1,2))}/1 1/1',"
104 |
105 | # Set blur
106 | if blur:
107 | cmd_filter += 'format=gray,smartblur=1,smartblur=3,'
108 |
109 | cmd_filter += f'format=gray [mask];[matte][video][mask] maskedmerge, format={pixformat}'
110 | cmd_filter = ['-filter_complex', cmd_filter]
111 | cmd = cmd + cmd_filter + cmd_end
112 |
113 | ffmpeg_cmd(cmd, get_length(self.filename), pb_prefix='Subtracting background:', stream=True)
114 |
115 | # Save subtracted video as subtract for parent MgVideo
116 | self.subtract = musicalgestures.MgVideo(target_name, color=color, returned_by_process=True)
117 |
118 | return self.subtract
119 |
--------------------------------------------------------------------------------
/musicalgestures/_videoadjust.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import cv2
3 | import os
4 | import musicalgestures
5 | from musicalgestures._utils import scale_num, scale_array, MgProgressbar, get_length, ffmpeg_cmd, has_audio, generate_outfilename, convert_to_mp4, convert_to_avi
6 |
7 |
8 | def contrast_brightness_ffmpeg(filename, contrast=0, brightness=0, target_name=None, overwrite=False):
9 | """
10 | Applies contrast and brightness adjustments on the source video using ffmpeg.
11 |
12 | Args:
13 | filename (str): Path to the video to process.
14 | contrast (int/float, optional): Increase or decrease contrast. Values range from -100 to 100. Defaults to 0.
15 | brightness (int/float, optional): Increase or decrease brightness. Values range from -100 to 100. Defaults to 0.
16 | target_name (str, optional): Defaults to None (which assumes that the input filename with the suffix "_cb" should be used).
17 | overwrite (bool, optional): Whether to allow overwriting existing files or to automatically increment target filename to avoid overwriting. Defaults to False.
18 |
19 | Returns:
20 | str: Path to the output video.
21 | """
22 | if contrast == 0 and brightness == 0:
23 | return
24 |
25 | of, fex = os.path.splitext(filename)
26 |
27 | if target_name == None:
28 | target_name = of + '_cb' + fex
29 | if not overwrite:
30 | target_name = generate_outfilename(target_name)
31 |
32 | # keeping values in sensible range
33 | contrast = np.clip(contrast, -100.0, 100.0)
34 | brightness = np.clip(brightness, -100.0, 100.0)
35 |
36 | # ranges are "handpicked" so that the results are close to the results of contrast_brightness_cv2 (deprecated)
37 | if contrast == 0:
38 | p_saturation, p_contrast, p_brightness = 0, 0, 0
39 | elif contrast > 0:
40 | p_saturation = scale_num(contrast, 0, 100, 1, 1.9)
41 | p_contrast = scale_num(contrast, 0, 100, 1, 2.3)
42 | p_brightness = scale_num(contrast, 0, 100, 0, 0.04)
43 | elif contrast < 0:
44 | p_saturation = scale_num(contrast, 0, -100, 1, 0)
45 | p_contrast = scale_num(contrast, 0, -100, 1, 0)
46 | p_brightness = 0
47 |
48 | if brightness != 0:
49 | p_brightness += brightness / 100
50 |
51 | cmd = ['ffmpeg', '-y', '-i', filename, '-vf',
52 | f'eq=saturation={p_saturation}:contrast={p_contrast}:brightness={p_brightness}', '-q:v', '3', "-c:a", "copy", target_name]
53 |
54 | ffmpeg_cmd(cmd, get_length(filename),
55 | pb_prefix='Adjusting contrast and brightness:')
56 |
57 | return target_name
58 |
59 |
60 | def skip_frames_ffmpeg(filename, skip=0, target_name=None, overwrite=False):
61 | """
62 | Time-shrinks the video by skipping (discarding) every n frames determined by `skip`.
63 | To discard half of the frames (ie. double the speed of the video) use `skip=1`.
64 |
65 | Args:
66 | filename (str): Path to the video to process.
67 | skip (int, optional): Discard `skip` frames before keeping one. Defaults to 0.
68 | target_name (str, optional): Defaults to None (which assumes that the input filename with the suffix "_skip" should be used).
69 | overwrite (bool, optional): Whether to allow overwriting existing files or to automatically increment target filename to avoid overwriting. Defaults to False.
70 |
71 | Returns:
72 | str: Path to the output video.
73 | """
74 | if skip == 0:
75 | return
76 |
77 | of, fex = os.path.splitext(filename)
78 | fex = '.avi'
79 |
80 | pts_ratio = 1 / (skip+1)
81 | atempo_ratio = skip+1
82 |
83 | if target_name == None:
84 | target_name = of + '_skip' + fex
85 | if not overwrite:
86 | target_name = generate_outfilename(target_name)
87 |
88 | # original duration of the file is stored in the -metadata title variable
89 | if has_audio(filename):
90 | cmd = ['ffmpeg', '-y', '-i', filename, '-metadata', f'title={get_length(filename)}', '-filter_complex',
91 | f'[0:v]setpts={pts_ratio}*PTS[v];[0:a]atempo={atempo_ratio}[a]', '-map', '[v]', '-map', '[a]', '-q:v', '3', '-shortest', target_name]
92 | else:
93 | cmd = ['ffmpeg', '-y', '-i', filename, '-metadata', f'title={get_length(filename)}', '-filter_complex',
94 | f'[0:v]setpts={pts_ratio}*PTS[v]', '-map', '[v]', '-q:v', '3', target_name]
95 |
96 | ffmpeg_cmd(cmd, get_length(filename), pb_prefix='Skipping frames:')
97 |
98 | return target_name
99 |
100 | def fixed_frames_ffmpeg(filename, frames=0, target_name=None, overwrite=False):
101 | """
102 | Specify a fixed target number frames to extract from the video.
103 | To extract only keyframes from the video, set the parameter keyframes to True.
104 |
105 | Args:
106 | filename (str): Path to the video to process.
107 | frames (int), optional): Number frames to extract from the video. If set to -1, it will only extract the keyframes of the video. Defaults to 0.
108 | target_name (str, optional): Defaults to None (which assumes that the input filename with the suffix "_fixed" should be used).
109 | overwrite (bool, optional): Whether to allow overwriting existing files or to automatically increment target filename to avoid overwriting. Defaults to False.
110 |
111 | Returns:
112 | str: Path to the output video.
113 | """
114 | of, fex = os.path.splitext(filename)
115 |
116 | if fex != '.mp4':
117 | # Convert video to mp4
118 | filename = convert_to_mp4(of + fex, overwrite=overwrite)
119 | of, fex = os.path.splitext(filename)
120 |
121 | if target_name == None:
122 | target_name = of + '_fixed' + fex
123 | if not overwrite:
124 | target_name = generate_outfilename(target_name)
125 |
126 | cap = cv2.VideoCapture(filename)
127 | nb_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
128 | fps = int(cap.get(cv2.CAP_PROP_FPS))
129 |
130 | pts_ratio = frames / nb_frames
131 | atempo_ratio = 1 / pts_ratio
132 |
133 | if frames == 0:
134 | return
135 |
136 | # Extract only keyframes
137 | if frames == -1:
138 | cmd = ['ffmpeg', '-y', '-discard', 'nokey', '-i', filename, '-c', 'copy', 'temp.h264']
139 | ffmpeg_cmd(cmd, get_length(filename), pb_prefix='Extracting keyframes:')
140 | cmd = ['ffmpeg', '-y', '-r', str(fps), '-f', 'h264', '-i', 'temp.h264', '-c', 'copy', target_name]
141 | ffmpeg_cmd(cmd, get_length(filename), pb_prefix='Encoding temporary video file:')
142 | # Remove temporary h264 video file
143 | os.remove('temp.h264')
144 |
145 | return target_name
146 |
147 | if has_audio(filename):
148 | cmd = ['ffmpeg', '-y', '-i', filename, '-filter_complex',
149 | f'[0:v]setpts={pts_ratio}*PTS[v];[0:a]atempo={atempo_ratio}[a]', '-map', '[v]', '-map', '[a]', '-q:v', '3', '-shortest', target_name]
150 | else:
151 | cmd = ['ffmpeg', '-y', '-i', filename, '-filter_complex',
152 | f'[0:v]setpts={pts_ratio}*PTS[v]', '-map', '[v]', '-q:v', '3', target_name]
153 |
154 | ffmpeg_cmd(cmd, get_length(filename), pb_prefix='Fixing frames:')
155 |
156 | return target_name
157 |
158 |
--------------------------------------------------------------------------------
/musicalgestures/_videograms.py:
--------------------------------------------------------------------------------
1 | import cv2
2 | import os
3 | import numpy as np
4 | from musicalgestures._utils import MgProgressbar, MgImage, get_widthheight, get_framecount, get_length, ffmpeg_cmd, generate_outfilename
5 | from musicalgestures._mglist import MgList
6 | from musicalgestures._videoadjust import skip_frames_ffmpeg
7 | import math
8 |
9 |
10 | def videograms_ffmpeg(self, target_name_x=None, target_name_y=None, overwrite=False):
11 | """
12 | Renders horizontal and vertical videograms of the source video using ffmpeg. Averages videoframes by axes,
13 | and creates two images of the horizontal-axis and vertical-axis stacks. In these stacks, a single row or
14 | column corresponds to a frame from the source video, and the index of the row or column corresponds to
15 | the index of the source frame.
16 |
17 | Args:
18 | target_name_x (str, optional): Target output name for the videogram on the X axis. Defaults to None (which assumes that the input filename with the suffix "_vgx" should be used).
19 | target_name_y (str, optional): Target output name for the videogram on the Y axis. Defaults to None (which assumes that the input filename with the suffix "_vgy" should be used).
20 | overwrite (bool, optional): Whether to allow overwriting existing files or to automatically increment target filenames to avoid overwriting. Defaults to False.
21 |
22 | Returns:
23 | MgList: An MgList with the MgImage objects referring to the horizontal and vertical videograms respectively.
24 | """
25 |
26 | width, height = get_widthheight(self.filename)
27 | framecount = get_framecount(self.filename)
28 |
29 | def calc_skipfactor(width, height, framecount):
30 | """
31 | Helper function to calculate the necessary frame-skipping to avoid integer overflow. This makes sure that we can succesfully create videograms even on many-hours-long videos as well.
32 |
33 | Args:
34 | width (int): The width of the video.
35 | height (int): The height of the video.
36 | framecount (int): The number of frames in the video.
37 |
38 | Returns:
39 | list(int, int): The necessary dilation factors to apply on the video for the horizontal and vertical videograms, respectively.
40 | """
41 |
42 | intmax = 2147483647
43 | skipfactor_x = int(
44 | math.ceil(framecount*8 / (intmax / (height+128) - 1024)))
45 | skipfactor_y = int(
46 | math.ceil(framecount / (intmax / ((width*8)+1024) - 128)))
47 | return skipfactor_x, skipfactor_y
48 |
49 | testx, testy = calc_skipfactor(width, height, framecount)
50 |
51 | if testx > 1 or testy > 1:
52 | necessary_skipfactor = max([testx, testy])
53 | print(f'{os.path.basename(self.filename)} is too large to process. Applying minimal skipping necessary...')
54 |
55 | shortened_file = skip_frames_ffmpeg(self.filename, skip=necessary_skipfactor-1)
56 | skip_of = os.path.splitext(shortened_file)[0]
57 | framecount = get_framecount(shortened_file)
58 | length = get_length(shortened_file)
59 |
60 | if target_name_x == None:
61 | target_name_x = skip_of+'_vgx.png'
62 | if target_name_y == None:
63 | target_name_y = skip_of+'_vgy.png'
64 | if not overwrite:
65 | target_name_x = generate_outfilename(target_name_x)
66 | target_name_y = generate_outfilename(target_name_y)
67 |
68 | cmd = ['ffmpeg', '-y', '-i', shortened_file, '-vf',
69 | f'scale=1:{height}:sws_flags=area,normalize,tile={framecount}x1', '-aspect', f'{framecount}:{height}', '-frames', '1', target_name_y]
70 | ffmpeg_cmd(cmd, length, stream=False, pb_prefix="Rendering horizontal videogram:")
71 |
72 | cmd = ['ffmpeg', '-y', '-i', shortened_file, '-vf',
73 | f'scale={width}:1:sws_flags=area,normalize,tile=1x{framecount}', '-aspect', f'{width}:{framecount}', '-frames', '1', target_name_x]
74 | ffmpeg_cmd(cmd, length, stream=False, pb_prefix="Rendering vertical videogram:")
75 |
76 | # save results as MgImages at self.video_gram_x and self.video_gram_y for parent MgObject
77 | self.videogram_x = MgImage(target_name_x)
78 | self.videogram_y = MgImage(target_name_y)
79 |
80 | # return MgList([MgImage(target_name_x), MgImage(target_name_y)])
81 | return MgList(self.videogram_x, self.videogram_y)
82 |
83 |
84 | else:
85 | length = get_length(self.filename)
86 |
87 | if target_name_x == None:
88 | target_name_x = self.of +'_vgx.png'
89 | if target_name_y == None:
90 | target_name_y = self.of+'_vgy.png'
91 | if not overwrite:
92 | target_name_x = generate_outfilename(target_name_x)
93 | target_name_y = generate_outfilename(target_name_y)
94 |
95 | cmd = ['ffmpeg', '-y', '-i', self.filename, '-frames', '1', '-vf',
96 | f'scale=1:{height}:sws_flags=area,normalize,tile={framecount}x1', '-aspect', f'{framecount}:{height}', target_name_y]
97 | ffmpeg_cmd(cmd, length, stream=False, pb_prefix="Rendering horizontal videogram:")
98 |
99 | cmd = ['ffmpeg', '-y', '-i', self.filename, '-frames', '1', '-vf',
100 | f'scale={width}:1:sws_flags=area,normalize,tile=1x{framecount}', '-aspect', f'{width}:{framecount}', target_name_x]
101 | ffmpeg_cmd(cmd, length, stream=False, pb_prefix="Rendering vertical videogram:")
102 |
103 | # save results as MgImages at self.videogram_x and self.videogram_y for parent MgObject
104 | self.videogram_x = MgImage(target_name_x)
105 | self.videogram_y = MgImage(target_name_y)
106 |
107 | # return MgList([MgImage(target_name_x), MgImage(target_name_y)])
108 | return MgList(self.videogram_x, self.videogram_y)
109 |
--------------------------------------------------------------------------------
/musicalgestures/_videoreader.py:
--------------------------------------------------------------------------------
1 | import cv2
2 | import os
3 | import numpy as np
4 | from musicalgestures._videoadjust import skip_frames_ffmpeg, fixed_frames_ffmpeg, contrast_brightness_ffmpeg
5 | from musicalgestures._cropvideo import mg_cropvideo_ffmpeg
6 | from musicalgestures._utils import has_audio, convert_to_avi, rotate_video, convert_to_grayscale, extract_subclip, get_length, get_fps, get_framecount, get_widthheight
7 |
8 |
9 | class ReadError(Exception):
10 | """Base class for file read errors."""
11 | pass
12 |
13 |
14 | def mg_videoreader(
15 | filename,
16 | starttime=0,
17 | endtime=0,
18 | skip=0,
19 | frames=0,
20 | rotate=0,
21 | contrast=0,
22 | brightness=0,
23 | crop='None',
24 | color=True,
25 | keep_all=False,
26 | returned_by_process=False):
27 | """
28 | Reads in a video file, and optionally apply several different processes on it. These include:
29 | - trimming,
30 | - skipping,
31 | - fixing,
32 | - rotating,
33 | - applying brightness and contrast,
34 | - cropping,
35 | - converting to grayscale.
36 |
37 | Args:
38 | filename (str): Path to the input video file.
39 | starttime (int/float, optional): Trims the video from this start time (s). Defaults to 0.
40 | endtime (int/float, optional): Trims the video until this end time (s). Defaults to 0 (which will make the algorithm use the full length of the input video instead).
41 | skip (int, optional): Time-shrinks the video by skipping (discarding) every n frames determined by `skip`. Defaults to 0.
42 | frames (int, optional): Specify a fixed target number of frames to extract from the video. Defaults to 0.
43 | rotate (int/float, optional): Rotates the video by a `rotate` degrees. Defaults to 0.
44 | contrast (int/float, optional): Applies +/- 100 contrast to video. Defaults to 0.
45 | brightness (int/float, optional): Applies +/- 100 brightness to video. Defaults to 0.
46 | crop (str, optional): If 'manual', opens a window displaying the first frame of the input video file, where the user can draw a rectangle to which cropping is applied. If 'auto' the cropping function attempts to determine the area of significant motion and applies the cropping to that area. Defaults to 'None'.
47 | color (bool, optional): If False, converts the video to grayscale and sets every method in grayscale mode. Defaults to True.
48 | keep_all (bool, optional): If True, preserves an output video file after each used preprocessing stage. Defaults to False.
49 | returned_by_process (bool, optional): This parameter is only for internal use, do not use it. Defaults to False.
50 |
51 | Returns:
52 | int: The number of frames in the output video file.
53 | int: The pixel width of the output video file.
54 | int: The pixel height of the output video file.
55 | int: The FPS (frames per second) of the output video file.
56 | float: The length of the output video file in seconds.
57 | str: The path to the output video file without its extension. The file name gets a suffix for each used process.
58 | str: The file extension of the output video file.
59 | bool: Whether the video has an audio track.
60 | """
61 |
62 | # Separate filename from file extension
63 | of, fex = os.path.splitext(filename)
64 |
65 | trimming = False
66 | skipping = False
67 | fixing = False
68 | rotating = False
69 | cbing = False
70 | cropping = False
71 |
72 | # Cut out relevant bit of video using starttime and endtime
73 | if starttime != 0 or endtime != 0:
74 | tmp_path = extract_subclip(filename, starttime, endtime, target_name=of + '_trim' + fex)
75 | of = os.path.splitext(tmp_path)[0]
76 | # of = of + '_trim'
77 | trimming = True
78 |
79 | if skip != 0:
80 | tmp_path = skip_frames_ffmpeg(of + fex, skip)
81 | if not keep_all and trimming:
82 | os.remove(of+fex)
83 |
84 | # of = of + '_skip'
85 | of, fex = os.path.splitext(tmp_path)
86 | skipping = True
87 |
88 | if frames != 0:
89 | tmp_path = fixed_frames_ffmpeg(of + fex, frames)
90 | if not keep_all and (skipping or trimming):
91 | os.remove(of+fex)
92 |
93 | # of = of + '_skip'
94 | of, fex = os.path.splitext(tmp_path)
95 | fixing = True
96 |
97 | length = get_framecount(of+fex)
98 | fps = get_fps(of+fex)
99 |
100 | # 0 means full length
101 | if endtime == 0:
102 | endtime = length/fps
103 |
104 | if rotate != 0:
105 | tmp_path = rotate_video(of + fex, rotate)
106 | if not keep_all and (fixing or skipping or trimming):
107 | os.remove(of + fex)
108 | of = os.path.splitext(tmp_path)[0]
109 | # of = of + '_rot'
110 | rotating = True
111 |
112 | # Apply contrast/brightness before the motion analysis
113 | if contrast != 0 or brightness != 0:
114 | tmp_path = contrast_brightness_ffmpeg(of+fex, contrast=contrast, brightness=brightness)
115 |
116 | if not keep_all and (rotating or fixing or skipping or trimming):
117 | os.remove(of + fex)
118 | # of = of + '_cb'
119 | of = os.path.splitext(tmp_path)[0]
120 | cbing = True
121 |
122 | # Crops video either manually or automatically
123 | if crop.lower() != 'none':
124 | tmp_path = mg_cropvideo_ffmpeg(of+fex, crop_movement=crop)
125 |
126 | if not keep_all and (cbing or rotating or fixing or skipping or trimming):
127 | os.remove(of + fex)
128 | of = os.path.splitext(tmp_path)[0]
129 | # of = of + '_crop'
130 | cropping = True
131 |
132 | if color == False and returned_by_process == False:
133 | tmp_path = convert_to_grayscale(of + fex)
134 | if not keep_all and (cropping or cbing or rotating or fixing or skipping or trimming):
135 | os.remove(of + fex)
136 | of = os.path.splitext(tmp_path)[0]
137 |
138 | width, height = get_widthheight(of+fex)
139 | video_has_audio_track = has_audio(of+fex)
140 |
141 | return length, width, height, fps, endtime, of, fex, video_has_audio_track
142 |
--------------------------------------------------------------------------------
/musicalgestures/deprecated/_deprecated_show.py:
--------------------------------------------------------------------------------
1 | def show_async(command):
2 | """Helper function to show ffplay windows asynchronously"""
3 | import asyncio
4 |
5 | async def run_cmd(command):
6 | process = await asyncio.create_subprocess_shell(command)
7 | await process.communicate()
8 |
9 | try:
10 | loop = asyncio.get_running_loop()
11 | except RuntimeError: # if cleanup: 'RuntimeError: There is no current event loop..'
12 | loop = None
13 |
14 | if loop and loop.is_running():
15 | tsk = loop.create_task(run_cmd(command))
16 | else:
17 | asyncio.run(run_cmd(command))
--------------------------------------------------------------------------------
/musicalgestures/deprecated/_motionhistory.py:
--------------------------------------------------------------------------------
1 | import cv2
2 | import os
3 | import numpy as np
4 | from scipy.signal import medfilt2d
5 | from musicalgestures._motionanalysis import centroid
6 | from musicalgestures._filter import filter_frame
7 | from musicalgestures._utils import mg_progressbar, extract_wav, embed_audio_in_video
8 | import musicalgestures
9 |
10 |
11 | def mg_motionhistory(
12 | self,
13 | history_length=10,
14 | kernel_size=5,
15 | filtertype='Regular',
16 | thresh=0.05,
17 | blur='None',
18 | inverted_motionhistory=False):
19 | """
20 | Finds the difference in pixel value from one frame to the next in an input video,
21 | and saves the difference frame to a history tail. The history frames are summed up
22 | and normalized, and added to the current difference frame to show the history of
23 | motion.
24 |
25 | Parameters
26 | ----------
27 | - history_length : int, optional
28 |
29 | Default is 10. Number of frames to be saved in the history tail.
30 | - kernel_size : int, optional
31 |
32 | Default is 5. Size of structuring element.
33 | - filtertype : {'Regular', 'Binary', 'Blob'}, optional
34 |
35 | `Regular` turns all values below `thresh` to 0.
36 | `Binary` turns all values below `thresh` to 0, above `thresh` to 1.
37 | `Blob` removes individual pixels with erosion method.
38 | - thresh : float, optional
39 |
40 | A number in the range of 0 to 1. Default is 0.05.
41 | Eliminates pixel values less than given threshold.
42 | - blur : {'None', 'Average'}, optional
43 |
44 | `Average` to apply a 10px * 10px blurring filter, `None` otherwise.
45 | - inverted_motionhistory : bool, optional
46 |
47 | Default is `False`. If `True`, inverts colors of the motionhistory video.
48 |
49 | Outputs
50 | -------
51 | - `filename`_motionhistory.avi
52 |
53 | Returns
54 | -------
55 | - MgVideo
56 |
57 | A new MgVideo pointing to the output '_motionhistory' video file.
58 | """
59 | enhancement = 1 # This can be adjusted to higher number to make motion more visible. Use with caution to not make it overflow.
60 | self.filtertype = filtertype
61 | self.thresh = thresh
62 | self.blur = blur
63 |
64 | vidcap = cv2.VideoCapture(self.of+self.fex)
65 | ret, frame = vidcap.read()
66 | #of = os.path.splitext(self.filename)[0]
67 | fex = os.path.splitext(self.filename)[1]
68 | fourcc = cv2.VideoWriter_fourcc(*'MJPG')
69 | out = cv2.VideoWriter(self.of + '_motionhistory' + fex,
70 | fourcc, self.fps, (self.width, self.height))
71 |
72 | ii = 0
73 | history = []
74 |
75 | if self.color == False:
76 | frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
77 |
78 | while(vidcap.isOpened()):
79 | if self.blur.lower() == 'average':
80 | prev_frame = cv2.blur(frame, (10, 10))
81 | elif self.blur.lower() == 'none':
82 | prev_frame = frame
83 |
84 | ret, frame = vidcap.read()
85 |
86 | if ret == True:
87 | if self.blur.lower() == 'average':
88 | # The higher these numbers the more blur you get
89 | frame = cv2.blur(frame, (10, 10))
90 |
91 | if self.color == False:
92 | frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
93 |
94 | frame = (np.array(frame)).astype(np.float64)
95 |
96 | if self.color == True:
97 | motion_frame_rgb = np.zeros([self.height, self.width, 3])
98 | for i in range(frame.shape[2]):
99 | motion_frame = (
100 | np.abs(frame[:, :, i]-prev_frame[:, :, i])).astype(np.float64)
101 | motion_frame = filter_frame(
102 | motion_frame, self.filtertype, self.thresh, kernel_size)
103 | motion_frame_rgb[:, :, i] = motion_frame
104 |
105 | if len(history) > 0:
106 | motion_history = motion_frame_rgb/(len(history)+1)
107 | else:
108 | motion_history = motion_frame_rgb
109 |
110 | for newframe in history:
111 | motion_history += newframe/(len(history)+1)
112 | # or however long history you would like
113 | if len(history) > history_length or len(history) == history_length:
114 | history.pop(0) # pop first frame
115 | history.append(motion_frame_rgb)
116 | motion_history = motion_history.astype(
117 | np.uint64) # 0.5 to not overload it poor thing
118 |
119 | else: # self.color = False
120 | motion_frame = (np.abs(frame-prev_frame)
121 | ).astype(np.float64)
122 | motion_frame = filter_frame(
123 | motion_frame, self.filtertype, self.thresh, kernel_size)
124 | if len(history) > 0:
125 | motion_history = motion_frame/(len(history)+1)
126 | else:
127 | motion_history = motion_frame
128 |
129 | for newframe in history:
130 | motion_history += newframe/(len(history)+1)
131 |
132 | # or however long history you would like
133 | if len(history) > history_length or len(history) == history_length:
134 | history.pop(0) # pop first frame
135 |
136 | history.append(motion_frame)
137 | motion_history = motion_history.astype(np.uint64)
138 |
139 | if self.color == False:
140 | motion_history_rgb = cv2.cvtColor(
141 | motion_history.astype(np.uint8), cv2.COLOR_GRAY2BGR)
142 | else:
143 | motion_history_rgb = motion_history
144 | if inverted_motionhistory:
145 | out.write(cv2.bitwise_not(
146 | enhancement*motion_history_rgb.astype(np.uint8)))
147 | else:
148 | out.write(enhancement*motion_history_rgb.astype(np.uint8))
149 | else:
150 | mg_progressbar(self.length, self.length,
151 | 'Rendering motion history video:', 'Complete')
152 | break
153 | ii += 1
154 | mg_progressbar(ii, self.length,
155 | 'Rendering motion history video:', 'Complete')
156 |
157 | out.release()
158 | source_audio = extract_wav(self.of + self.fex)
159 | destination_video = self.of + '_motionhistory' + self.fex
160 | embed_audio_in_video(source_audio, destination_video)
161 | os.remove(source_audio)
162 |
163 | return musicalgestures.MgVideo(destination_video, color=self.color, returned_by_process=True)
164 |
--------------------------------------------------------------------------------
/musicalgestures/documentation/figures/logos/RITMO_150px.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fourMs/MGT-python/88ca0f526fce071b1817f89ce6e5a439ca9f9111/musicalgestures/documentation/figures/logos/RITMO_150px.png
--------------------------------------------------------------------------------
/musicalgestures/documentation/figures/logos/UiO_150px.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fourMs/MGT-python/88ca0f526fce071b1817f89ce6e5a439ca9f9111/musicalgestures/documentation/figures/logos/UiO_150px.png
--------------------------------------------------------------------------------
/musicalgestures/documentation/figures/promo/ipython_example.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fourMs/MGT-python/88ca0f526fce071b1817f89ce6e5a439ca9f9111/musicalgestures/documentation/figures/promo/ipython_example.gif
--------------------------------------------------------------------------------
/musicalgestures/documentation/figures/promo/mgt-python-promo.odg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fourMs/MGT-python/88ca0f526fce071b1817f89ce6e5a439ca9f9111/musicalgestures/documentation/figures/promo/mgt-python-promo.odg
--------------------------------------------------------------------------------
/musicalgestures/documentation/figures/promo/mgt-python-promo_wide-crop.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fourMs/MGT-python/88ca0f526fce071b1817f89ce6e5a439ca9f9111/musicalgestures/documentation/figures/promo/mgt-python-promo_wide-crop.jpg
--------------------------------------------------------------------------------
/musicalgestures/documentation/figures/promo/mgt-python.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fourMs/MGT-python/88ca0f526fce071b1817f89ce6e5a439ca9f9111/musicalgestures/documentation/figures/promo/mgt-python.png
--------------------------------------------------------------------------------
/musicalgestures/documentation/figures/promo/mgt-python_640.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fourMs/MGT-python/88ca0f526fce071b1817f89ce6e5a439ca9f9111/musicalgestures/documentation/figures/promo/mgt-python_640.jpg
--------------------------------------------------------------------------------
/musicalgestures/documentation/figures/promo/mgt-python_new.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fourMs/MGT-python/88ca0f526fce071b1817f89ce6e5a439ca9f9111/musicalgestures/documentation/figures/promo/mgt-python_new.png
--------------------------------------------------------------------------------
/musicalgestures/documentation/figures/promo/mgt-python_new_640.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fourMs/MGT-python/88ca0f526fce071b1817f89ce6e5a439ca9f9111/musicalgestures/documentation/figures/promo/mgt-python_new_640.png
--------------------------------------------------------------------------------
/musicalgestures/documentation/figures/promo/notebook-middle.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fourMs/MGT-python/88ca0f526fce071b1817f89ce6e5a439ca9f9111/musicalgestures/documentation/figures/promo/notebook-middle.png
--------------------------------------------------------------------------------
/musicalgestures/documentation/figures/promo/notebook-middle_150.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fourMs/MGT-python/88ca0f526fce071b1817f89ce6e5a439ca9f9111/musicalgestures/documentation/figures/promo/notebook-middle_150.jpg
--------------------------------------------------------------------------------
/musicalgestures/examples/dancer.avi:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fourMs/MGT-python/88ca0f526fce071b1817f89ce6e5a439ca9f9111/musicalgestures/examples/dancer.avi
--------------------------------------------------------------------------------
/musicalgestures/examples/pianist.avi:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fourMs/MGT-python/88ca0f526fce071b1817f89ce6e5a439ca9f9111/musicalgestures/examples/pianist.avi
--------------------------------------------------------------------------------
/musicalgestures/examples/test_dance.py:
--------------------------------------------------------------------------------
1 | import musicalgestures
2 |
3 | # CREATE MODULE OBJECT: Here is an example call to create an MgVideo, using loads of parameters
4 | mg = musicalgestures.MgVideo("./musicalgestures/examples/dancer.avi", starttime=2,
5 | endtime=20, contrast=100, brightness=50)
6 |
7 | # USE MODULE METHOD: To run the motionvideo analysis, run the function using your video object
8 | mg.motion(inverted_motionvideo=False, inverted_motiongram=False,
9 | thresh=0.05, unit='seconds')
10 |
11 | # History video
12 | mg.history(history_length=25)
13 | # Motion history video
14 | mg.history(filename=mg.of + '_motion.avi', history_length=25)
15 |
16 | # Average image of original video
17 | # mg.blend(filename="./musicalgestures/examples/dancer.avi", component_mode='average')
18 |
19 | # Average image of pre-processed video
20 | mg.blend(component_mode='average')
21 |
22 | # Average image of motion video
23 | mg.blend(filename=mg.of + '_motion.avi', component_mode='average')
24 |
--------------------------------------------------------------------------------
/musicalgestures/examples/test_pianist.py:
--------------------------------------------------------------------------------
1 | import musicalgestures
2 |
3 | # CREATE MODULE OBJECT: Here is an example call to create an MgVideo, using loads of parameters
4 | mg = musicalgestures.MgVideo("./musicalgestures/examples/pianist.avi", color=False, crop='auto', skip=3)
5 | # USE MODULE METHOD: To run the motionvideo analysis, run the function using your video object,
6 | # then create the motion history by chaining the history() function onto the result of the previous (motion) function
7 | mg.motion(inverted_motionvideo=True, inverted_motiongram=True,
8 | thresh=0.1, blur='Average').history(history_length=25)
9 |
10 | # Average image of original video
11 | # mg.blend(filename="./musicalgestures/examples/pianist.avi", component_mode='average')
12 |
13 | # Average image of pre-processed video
14 | mg.blend(component_mode='average')
15 |
16 | # Average image of motion video
17 | mg.blend(filename=mg.of + '_motion.avi', component_mode='average')
18 |
--------------------------------------------------------------------------------
/musicalgestures/models/centerface.onnx:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fourMs/MGT-python/88ca0f526fce071b1817f89ce6e5a439ca9f9111/musicalgestures/models/centerface.onnx
--------------------------------------------------------------------------------
/musicalgestures/pose/getBODY_25_here.bat:
--------------------------------------------------------------------------------
1 | :: Avoid printing all the comments in the Windows cmd
2 | @echo off
3 |
4 | echo Downloading body pose (BODY_25) model...
5 |
6 | SET WGET_EXE=..\3rdparty\windows\wget\wget.exe
7 | SET OPENPOSE_URL=https://www.uio.no/ritmo/english/research/labs/fourms/software/musicalgesturestoolbox/mgt-python/pose-models/
8 | SET BODY_25_FOLDER=body_25/
9 |
10 | echo:
11 |
12 | SET BODY_25_MODEL=body25/pose_iter_584000.caffemodel
13 | %WGET_EXE% -c %OPENPOSE_URL%%BODY_25_MODEL% -P %BODY_25_FOLDER% --no-check-certificate
14 |
15 | echo Download finished.
--------------------------------------------------------------------------------
/musicalgestures/pose/getBODY_25_here.sh:
--------------------------------------------------------------------------------
1 | # Downloading body pose (BODY_25) model
2 | wget -c "https://www.uio.no/ritmo/english/research/labs/fourms/software/musicalgesturestoolbox/mgt-python/pose-models/body25/pose_iter_584000.caffemodel" -P "body_25/" --no-check-certificate
3 | # Download finished.
--------------------------------------------------------------------------------
/musicalgestures/pose/getBODY_25_remote.bat:
--------------------------------------------------------------------------------
1 | :: Avoid printing all the comments in the Windows cmd
2 | @echo off
3 |
4 | echo Downloading body pose (BODY_25) model...
5 |
6 | SET WGET_EXE=%1
7 | SET OPENPOSE_URL=https://www.uio.no/ritmo/english/research/labs/fourms/software/musicalgesturestoolbox/mgt-python/pose-models/
8 | SET BODY_25_FOLDER=%2
9 |
10 | echo:
11 |
12 | SET BODY_25_MODEL=body25/pose_iter_584000.caffemodel
13 | %WGET_EXE% -c %OPENPOSE_URL%%BODY_25_MODEL% -P %BODY_25_FOLDER% --no-check-certificate
14 |
15 | echo Download finished.
--------------------------------------------------------------------------------
/musicalgestures/pose/getBODY_25_remote.sh:
--------------------------------------------------------------------------------
1 | # Downloading body pose (MPI) model
2 | wget -c "https://www.uio.no/ritmo/english/research/labs/fourms/software/musicalgesturestoolbox/mgt-python/pose-models/body25/pose_iter_584000.caffemodel" -P "$1" --no-check-certificate
3 | # Download finished.
--------------------------------------------------------------------------------
/musicalgestures/pose/getBODY_25_remote_colab.sh:
--------------------------------------------------------------------------------
1 | # Downloading body pose (BODY_25) model
2 | wget -P "$1" "https://www.uio.no/ritmo/english/research/labs/fourms/software/musicalgesturestoolbox/mgt-python/pose-models/body25/pose_iter_584000.caffemodel" --no-check-certificate
3 | # Download finished.
--------------------------------------------------------------------------------
/musicalgestures/pose/getCOCO_here.bat:
--------------------------------------------------------------------------------
1 | :: Avoid printing all the comments in the Windows cmd
2 | @echo off
3 |
4 | echo Downloading body pose (COCO) model...
5 |
6 | SET WGET_EXE=..\3rdparty\windows\wget\wget.exe
7 | SET OPENPOSE_URL=https://www.uio.no/ritmo/english/research/labs/fourms/software/musicalgesturestoolbox/mgt-python/pose-models/
8 | SET COCO_FOLDER=coco/
9 |
10 | echo:
11 |
12 | SET COCO_MODEL=coco/pose_iter_440000.caffemodel
13 | %WGET_EXE% -c %OPENPOSE_URL%%COCO_MODEL% -P %COCO_FOLDER% --no-check-certificate
14 |
15 | echo Download finished.
--------------------------------------------------------------------------------
/musicalgestures/pose/getCOCO_here.sh:
--------------------------------------------------------------------------------
1 | # Downloading body pose (COCO) model
2 | wget -c "https://www.uio.no/ritmo/english/research/labs/fourms/software/musicalgesturestoolbox/mgt-python/pose-models/coco/pose_iter_440000.caffemodel" -P "coco/" --no-check-certificate
3 | # Download finished.
4 |
--------------------------------------------------------------------------------
/musicalgestures/pose/getCOCO_remote.bat:
--------------------------------------------------------------------------------
1 | :: Avoid printing all the comments in the Windows cmd
2 | @echo off
3 |
4 | echo Downloading body pose (COCO) model...
5 |
6 | SET WGET_EXE=%1
7 | SET OPENPOSE_URL=https://www.uio.no/ritmo/english/research/labs/fourms/software/musicalgesturestoolbox/mgt-python/pose-models/
8 | SET COCO_FOLDER=%2
9 |
10 | echo:
11 |
12 | SET COCO_MODEL=coco/pose_iter_440000.caffemodel
13 | %WGET_EXE% -c %OPENPOSE_URL%%COCO_MODEL% -P %COCO_FOLDER% --no-check-certificate
14 |
15 | echo Download finished.
--------------------------------------------------------------------------------
/musicalgestures/pose/getCOCO_remote.sh:
--------------------------------------------------------------------------------
1 | # Downloading body pose (COCO) model
2 | wget -c "https://www.uio.no/ritmo/english/research/labs/fourms/software/musicalgesturestoolbox/mgt-python/pose-models/coco/pose_iter_440000.caffemodel" -P "$1" --no-check-certificate
3 | # Download finished.
4 |
--------------------------------------------------------------------------------
/musicalgestures/pose/getCOCO_remote_colab.sh:
--------------------------------------------------------------------------------
1 | # Downloading body pose (COCO) model
2 | wget -P "$1" "https://www.uio.no/ritmo/english/research/labs/fourms/software/musicalgesturestoolbox/mgt-python/pose-models/coco/pose_iter_440000.caffemodel" --no-check-certificate
3 | # Download finished.
4 |
--------------------------------------------------------------------------------
/musicalgestures/pose/getMPI_here.bat:
--------------------------------------------------------------------------------
1 | :: Avoid printing all the comments in the Windows cmd
2 | @echo off
3 |
4 | echo Downloading body pose (MPI) model...
5 |
6 | SET WGET_EXE=..\3rdparty\windows\wget\wget.exe
7 | SET OPENPOSE_URL=https://www.uio.no/ritmo/english/research/labs/fourms/software/musicalgesturestoolbox/mgt-python/pose-models/
8 | SET MPI_FOLDER=mpi/
9 |
10 | echo:
11 |
12 | SET MPI_MODEL=mpi/pose_iter_160000.caffemodel
13 | %WGET_EXE% -c %OPENPOSE_URL%%MPI_MODEL% -P %MPI_FOLDER% --no-check-certificate
14 |
15 | echo Download finished.
--------------------------------------------------------------------------------
/musicalgestures/pose/getMPI_here.sh:
--------------------------------------------------------------------------------
1 | # Downloading body pose (MPI) model
2 | wget -c "https://www.uio.no/ritmo/english/research/labs/fourms/software/musicalgesturestoolbox/mgt-python/pose-models/mpi/pose_iter_160000.caffemodel" -P "mpi/" --no-check-certificate
3 | # Download finished.
--------------------------------------------------------------------------------
/musicalgestures/pose/getMPI_remote.bat:
--------------------------------------------------------------------------------
1 | :: Avoid printing all the comments in the Windows cmd
2 | @echo off
3 |
4 | echo Downloading body pose (MPI) model...
5 |
6 | SET WGET_EXE=%1
7 | SET OPENPOSE_URL=https://www.uio.no/ritmo/english/research/labs/fourms/software/musicalgesturestoolbox/mgt-python/pose-models/
8 | SET MPI_FOLDER=%2
9 |
10 | echo:
11 |
12 | SET MPI_MODEL=mpi/pose_iter_160000.caffemodel
13 | %WGET_EXE% -c %OPENPOSE_URL%%MPI_MODEL% -P %MPI_FOLDER% --no-check-certificate
14 |
15 | echo Download finished.
--------------------------------------------------------------------------------
/musicalgestures/pose/getMPI_remote.sh:
--------------------------------------------------------------------------------
1 | # Downloading body pose (MPI) model
2 | wget -c "https://www.uio.no/ritmo/english/research/labs/fourms/software/musicalgesturestoolbox/mgt-python/pose-models/mpi/pose_iter_160000.caffemodel" -P "$1" --no-check-certificate
3 | # Download finished.
--------------------------------------------------------------------------------
/musicalgestures/pose/getMPI_remote_colab.sh:
--------------------------------------------------------------------------------
1 | # Downloading body pose (MPI) model
2 | wget -P "$1" "https://www.uio.no/ritmo/english/research/labs/fourms/software/musicalgesturestoolbox/mgt-python/pose-models/mpi/pose_iter_160000.caffemodel" --no-check-certificate
3 | # Download finished.
--------------------------------------------------------------------------------
/paper/figures/keyframe-image.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fourMs/MGT-python/88ca0f526fce071b1817f89ce6e5a439ca9f9111/paper/figures/keyframe-image.jpg
--------------------------------------------------------------------------------
/paper/figures/motiongram.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fourMs/MGT-python/88ca0f526fce071b1817f89ce6e5a439ca9f9111/paper/figures/motiongram.jpg
--------------------------------------------------------------------------------
/paper/paper.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: 'Musical Gestures Toolbox for Python: A package for music-related analysis of video files'
3 | tags:
4 | - Python
5 | - musicology
6 | - music science
7 | - video analysis
8 | - computer vision
9 | authors:
10 | - name: Alexander Refsum Jensenius
11 | orcid: 0000-0003-0872-7098
12 | affiliation: 1
13 | - name: Balint Laczko
14 | affiliation: 1
15 | - name: Marcus Widmer
16 | affiliation: 1
17 | - name: Frida Furmyr
18 | affiliation: 1
19 | affiliations:
20 | - name: University of Oslo
21 | index: 1
22 | date: 26 August 2020
23 | bibliography: paper.bib
24 | ---
25 |
26 | # Summary
27 |
28 | Videos can be used to develop new visualisations to be used for analysis. The aim of creating such alternate displays from video recordings is to uncover features, structures and similarities within the material itself, and in relation to, for example, score material. Three useful visualisation techniques here are motion images, motion history images and motiongrams (\autoref{fig:motiongram}.).
29 |
30 | MGT can generate both dynamic and static visualizations, as well as some quantitative data. A dynamic visualisation comes in the form of a video file, and includes *motion videos* and *motion history videos*. The difference between them is that the latter includes a trace of the motion over time. A static visualisation is an image. A *motion average images* is similar to keeping the shutter open on an analog camera, which will show everything that changed over time. There are also two types of spatiotemporal displays, that is, an image that gives a sense of both time and space. A *videogram* is made from the original video, while a *motiongram* from the motion video.
31 |
32 | In addition to the visualisations, MGT can also export some basic computer vision features, including *quantity of motion*, *centroid of motion*, and *area of motion*. These are fairly crude, and primarily useful if you have only one subject in the image.
33 |
34 | The foundation for the techniques implemented in the toolbox was presented in `[@jensenius_actionsound:_2007]'.
35 |
36 |
37 | # Figures
38 |
39 | 
40 |
41 | 
42 |
43 |
44 | # Acknowledgements
45 |
46 | This work was supported by the University of Oslo and the Research Council of Norway through its Centers of Excellence scheme, project numbers 250698 and 262762.
47 |
48 | # References
49 |
--------------------------------------------------------------------------------
/presentation/README.md:
--------------------------------------------------------------------------------
1 | # MGT-python tutorial
2 |
3 | The presentation in this folder is automatically generated from the [Jupyter Notebook](https://github.com/fourMs/MGT-python/blob/master/musicalgestures/MusicalGesturesToolbox.ipynb) using the reveal.js framework. It is used in workshops where the learners type the commands on their own computers. If you don't want to type yourself (or copy and paste from the presentation), it is better to open the notebook itself and run the commands from there.
--------------------------------------------------------------------------------
/setup.cfg:
--------------------------------------------------------------------------------
1 | [metadata]
2 | description-file = README.md
--------------------------------------------------------------------------------
/setup.py:
--------------------------------------------------------------------------------
1 | from setuptools import setup, find_packages
2 | #from distutils.core import setup
3 | import pathlib
4 |
5 | # The directory containing this file
6 | HERE = pathlib.Path(__file__).parent
7 |
8 | # The text of the README file
9 | README = (HERE / "README.md").read_text()
10 |
11 |
12 | setup(
13 | name='musicalgestures',
14 | packages=['musicalgestures'],
15 | version='v1.3.2',
16 | license='GNU General Public License v3 (GPLv3)',
17 | description='Musical Gestures Toolbox for Python',
18 | long_description=README,
19 | long_description_content_type='text/markdown',
20 | include_package_data=True,
21 | package_data={'musicalgestures': [
22 | 'dance.avi', 'LICENSE', 'MusicalGesturesToolbox.ipynb', 'examples/*', 'pose/*']},
23 | author='University of Oslo fourMs Lab',
24 | author_email='a.r.jensenius@imv.uio.no',
25 | url='https://github.com/fourMs/MGT-python',
26 | download_url='https://github.com/fourMs/MGT-python/archive/v1.3.2.tar.gz',
27 | keywords=['Computer Vision', 'Motion Analysis',
28 | 'Musical Gestures', 'Video-Analysis'],
29 | install_requires=[
30 | 'numpy',
31 | 'pandas',
32 | 'matplotlib',
33 | 'opencv-python',
34 | 'scipy',
35 | 'scikit-image',
36 | 'librosa',
37 | 'ipython>=7.12'
38 | ],
39 | python_requires='~=3.7',
40 | classifiers=[
41 | 'Development Status :: 5 - Production/Stable',
42 | 'Intended Audience :: Science/Research',
43 | 'Topic :: Multimedia :: Video',
44 | 'Topic :: Software Development :: Libraries :: Python Modules',
45 | 'Topic :: Utilities',
46 | 'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',
47 | 'Programming Language :: Python :: 3.7',
48 | 'Programming Language :: Python :: 3.8',
49 | ]
50 | )
51 |
--------------------------------------------------------------------------------
/tests/test_audio.py:
--------------------------------------------------------------------------------
1 | import os
2 | import pytest
3 | import musicalgestures
4 | from musicalgestures._utils import MgFigure, get_length, extract_subclip
5 |
6 |
7 | @pytest.fixture(scope="class")
8 | def testvideo_avi(tmp_path_factory):
9 | target_name = os.path.join(str(tmp_path_factory.mktemp("data")), "testvideo.avi")
10 | print(target_name)
11 | testvideo_avi = extract_subclip(musicalgestures.examples.dance, 5, 6, target_name=target_name)
12 | return testvideo_avi
13 |
14 | @pytest.fixture(scope="class")
15 | def testvideo_avi_silent(tmp_path_factory):
16 | target_name = os.path.join(str(tmp_path_factory.mktemp("data")), "testvideo.avi")
17 | target_name_silent = os.path.join(str(tmp_path_factory.mktemp("data")), "testvideo_silent.avi")
18 | testvideo_avi = extract_subclip(musicalgestures.examples.dance, 5, 6, target_name=target_name)
19 | cmd = ["ffmpeg", "-y", "-i", target_name, "-an", target_name_silent]
20 | musicalgestures._utils.ffmpeg_cmd(cmd, get_length(testvideo_avi), stream=False)
21 | return target_name_silent
22 |
23 |
24 | class Test_Audio:
25 | def test_init(self, testvideo_avi):
26 | my_audio = musicalgestures.MgAudio(testvideo_avi)
27 | assert os.path.basename(my_audio.filename) == "testvideo.avi"
28 | # assert my_audio.of == "testvideo"
29 | # assert my_audio.fex == ".avi"
30 | def test_no_audio(self, testvideo_avi_silent):
31 | assert musicalgestures.MgVideo(testvideo_avi_silent).audio is None
32 |
33 | class Test_Audio_Waveform:
34 | def test_target_name_is_none(self, testvideo_avi):
35 | result = musicalgestures.MgVideo(testvideo_avi).audio.waveform(target_name=None)
36 | assert type(result) == MgFigure
37 | assert result.figure_type == "audio.waveform"
38 | assert os.path.isfile(result.image) == True
39 | assert os.path.splitext(result.image)[1] == ".png"
40 |
41 | def test_target_name(self, testvideo_avi):
42 | tmp_folder = os.path.dirname(testvideo_avi)
43 | target_name = tmp_folder + "/result.png"
44 | result = musicalgestures.MgVideo(testvideo_avi).audio.waveform(target_name=target_name)
45 | assert type(result) == MgFigure
46 | assert result.figure_type == "audio.waveform"
47 | assert os.path.isfile(result.image) == True
48 | assert os.path.splitext(result.image)[1] == ".png"
49 | assert target_name == result.image
50 |
51 | def test_target_no_autoshow(self, testvideo_avi):
52 | result = musicalgestures.MgVideo(testvideo_avi).audio.waveform(autoshow=False)
53 | assert type(result) == MgFigure
54 | assert result.figure_type == "audio.waveform"
55 | assert os.path.isfile(result.image) == True
56 | assert os.path.splitext(result.image)[1] == ".png"
57 |
58 | class Test_Audio_Spectrogram:
59 | def test_target_name_is_none(self, testvideo_avi):
60 | result = musicalgestures.MgVideo(testvideo_avi).audio.spectrogram(target_name=None)
61 | assert type(result) == MgFigure
62 | assert result.figure_type == "audio.spectrogram"
63 | assert os.path.isfile(result.image) == True
64 | assert os.path.splitext(result.image)[1] == ".png"
65 |
66 | def test_target_name(self, testvideo_avi):
67 | tmp_folder = os.path.dirname(testvideo_avi)
68 | target_name = tmp_folder + "/result.png"
69 | result = musicalgestures.MgVideo(testvideo_avi).audio.spectrogram(target_name=target_name)
70 | assert type(result) == MgFigure
71 | assert result.figure_type == "audio.spectrogram"
72 | assert os.path.isfile(result.image) == True
73 | assert os.path.splitext(result.image)[1] == ".png"
74 | assert target_name == result.image
75 |
76 | def test_target_no_autoshow(self, testvideo_avi):
77 | result = musicalgestures.MgVideo(testvideo_avi).audio.spectrogram(autoshow=False)
78 | assert type(result) == MgFigure
79 | assert result.figure_type == "audio.spectrogram"
80 | assert os.path.isfile(result.image) == True
81 | assert os.path.splitext(result.image)[1] == ".png"
82 |
83 | class Test_Audio_Descriptors:
84 | def test_target_name_is_none(self, testvideo_avi):
85 | result = musicalgestures.MgVideo(testvideo_avi).audio.descriptors(target_name=None)
86 | assert type(result) == MgFigure
87 | assert result.figure_type == "audio.descriptors"
88 | assert os.path.isfile(result.image) == True
89 | assert os.path.splitext(result.image)[1] == ".png"
90 |
91 | def test_target_name(self, testvideo_avi):
92 | tmp_folder = os.path.dirname(testvideo_avi)
93 | target_name = tmp_folder + "/result.png"
94 | result = musicalgestures.MgVideo(testvideo_avi).audio.descriptors(target_name=target_name)
95 | assert type(result) == MgFigure
96 | assert result.figure_type == "audio.descriptors"
97 | assert os.path.isfile(result.image) == True
98 | assert os.path.splitext(result.image)[1] == ".png"
99 | assert target_name == result.image
100 |
101 | def test_target_no_autoshow(self, testvideo_avi):
102 | result = musicalgestures.MgVideo(testvideo_avi).audio.descriptors(autoshow=False)
103 | assert type(result) == MgFigure
104 | assert result.figure_type == "audio.descriptors"
105 | assert os.path.isfile(result.image) == True
106 | assert os.path.splitext(result.image)[1] == ".png"
107 |
108 | class Test_Audio_Tempogram:
109 | def test_target_name_is_none(self, testvideo_avi):
110 | result = musicalgestures.MgVideo(
111 | testvideo_avi).audio.tempogram(target_name=None)
112 | assert type(result) == MgFigure
113 | assert result.figure_type == "audio.tempogram"
114 | assert os.path.isfile(result.image) == True
115 | assert os.path.splitext(result.image)[1] == ".png"
116 |
117 | def test_target_name(self, testvideo_avi):
118 | tmp_folder = os.path.dirname(testvideo_avi)
119 | target_name = tmp_folder + "/result.png"
120 | result = musicalgestures.MgVideo(
121 | testvideo_avi).audio.tempogram(target_name=target_name)
122 | assert type(result) == MgFigure
123 | assert result.figure_type == "audio.tempogram"
124 | assert os.path.isfile(result.image) == True
125 | assert os.path.splitext(result.image)[1] == ".png"
126 | assert target_name == result.image
127 |
128 | def test_target_no_autoshow(self, testvideo_avi):
129 | result = musicalgestures.MgVideo(
130 | testvideo_avi).audio.tempogram(autoshow=False)
131 | assert type(result) == MgFigure
132 | assert result.figure_type == "audio.tempogram"
133 | assert os.path.isfile(result.image) == True
134 | assert os.path.splitext(result.image)[1] == ".png"
--------------------------------------------------------------------------------
/tests/test_average.py:
--------------------------------------------------------------------------------
1 | import musicalgestures
2 | import os
3 | import pytest
4 |
5 |
6 | @pytest.fixture(scope="class")
7 | def testvideo_avi(tmp_path_factory):
8 | target_name = str(tmp_path_factory.mktemp("data")).replace(
9 | "\\", "/") + "/testvideo.avi"
10 | testvideo_avi = musicalgestures._utils.extract_subclip(
11 | musicalgestures.examples.dance, 5, 6, target_name=target_name)
12 | return testvideo_avi
13 |
14 |
15 | @pytest.fixture(scope="class")
16 | def testvideo_mp4(tmp_path_factory):
17 | target_name = str(tmp_path_factory.mktemp("data")).replace(
18 | "\\", "/") + "/testvideo.avi"
19 | testvideo_avi = musicalgestures._utils.extract_subclip(
20 | musicalgestures.examples.dance, 5, 6, target_name=target_name)
21 | testvideo_mp4 = musicalgestures._utils.convert_to_mp4(testvideo_avi)
22 | os.remove(testvideo_avi)
23 | return testvideo_mp4
24 |
25 |
26 | class Test_Average:
27 | def test_normal_case(self):
28 | mg = musicalgestures.MgVideo(musicalgestures.examples.dance)
29 | result = mg.average()
30 | assert type(result) == musicalgestures._utils.MgImage
31 | assert os.path.isfile(result.filename) == True
32 | assert os.path.splitext(result.filename)[1] == ".png"
33 |
34 | def test_not_avi(self, testvideo_mp4):
35 | mg = musicalgestures.MgVideo(testvideo_mp4)
36 | result = mg.average()
37 | assert type(result) == musicalgestures._utils.MgImage
38 | assert os.path.isfile(result.filename) == True
39 | assert os.path.splitext(result.filename)[1] == ".png"
40 |
41 | def test_no_color(self):
42 | mg = musicalgestures.MgVideo(
43 | musicalgestures.examples.dance, color=False)
44 | result = mg.average()
45 | assert type(result) == musicalgestures._utils.MgImage
46 | assert os.path.isfile(result.filename) == True
47 | assert os.path.splitext(result.filename)[1] == ".png"
48 |
49 | def test_no_normalize(self, testvideo_avi):
50 | mg = musicalgestures.MgVideo(testvideo_avi)
51 | result = mg.average(normalize=False)
52 | assert type(result) == musicalgestures._utils.MgImage
53 | assert os.path.isfile(result.filename) == True
54 | assert os.path.splitext(result.filename)[1] == ".png"
55 |
--------------------------------------------------------------------------------
/tests/test_centroid.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | from musicalgestures._motionanalysis import *
3 |
4 |
5 | class Test_Centroid:
6 | def test_empty_image(self):
7 | image_in = np.zeros((1920, 1080, 3)).astype(np.uint8)
8 | result = centroid(image_in, 1920, 1080)
9 | assert type(result) == tuple
10 | assert result[0][0] == 0
11 | assert result[0][1] == 1080
12 |
--------------------------------------------------------------------------------
/tests/test_init.py:
--------------------------------------------------------------------------------
1 | import musicalgestures
2 |
3 |
4 | def test_repr():
5 | mg = musicalgestures.MgVideo(musicalgestures.examples.dance)
6 | assert mg.__repr__() == f"MgVideo('{musicalgestures.examples.dance}')"
7 |
--------------------------------------------------------------------------------
/tests/test_ssm.py:
--------------------------------------------------------------------------------
1 | # test_with_pytest.py
2 |
3 | def test_always_passes():
4 | assert True
5 |
6 | def test_always_fails():
7 | assert False
--------------------------------------------------------------------------------
/tests/test_videograms.py:
--------------------------------------------------------------------------------
1 | import musicalgestures
2 | import os
3 | import pytest
4 |
5 |
6 | @pytest.fixture(scope="class")
7 | def testvideo_avi(tmp_path_factory):
8 | target_name = str(tmp_path_factory.mktemp("data")).replace(
9 | "\\", "/") + "/testvideo.avi"
10 | testvideo_avi = musicalgestures._utils.extract_subclip(
11 | musicalgestures.examples.dance, 5, 6, target_name=target_name)
12 | return testvideo_avi
13 |
14 |
15 | @pytest.fixture(scope="class")
16 | def testvideo_mp4(tmp_path_factory):
17 | target_name = str(tmp_path_factory.mktemp("data")).replace(
18 | "\\", "/") + "/testvideo.avi"
19 | testvideo_avi = musicalgestures._utils.extract_subclip(
20 | musicalgestures.examples.dance, 5, 6, target_name=target_name)
21 | testvideo_mp4 = musicalgestures._utils.convert_to_mp4(testvideo_avi)
22 | os.remove(testvideo_avi)
23 | return testvideo_mp4
24 |
25 |
26 | class Test_videograms:
27 | def test_normal_case(self, testvideo_avi):
28 | mg = musicalgestures.MgVideo(testvideo_avi)
29 | result = mg.videograms()
30 | assert type(result) == musicalgestures.MgList
31 | for videogram in result:
32 | assert type(videogram) == musicalgestures.MgImage
33 | assert os.path.isfile(videogram.filename) == True
34 |
--------------------------------------------------------------------------------
/wiki_pics/average_example.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fourMs/MGT-python/88ca0f526fce071b1817f89ce6e5a439ca9f9111/wiki_pics/average_example.png
--------------------------------------------------------------------------------
/wiki_pics/blur_faces.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fourMs/MGT-python/88ca0f526fce071b1817f89ce6e5a439ca9f9111/wiki_pics/blur_faces.gif
--------------------------------------------------------------------------------
/wiki_pics/centroid-of-motion_640.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fourMs/MGT-python/88ca0f526fce071b1817f89ce6e5a439ca9f9111/wiki_pics/centroid-of-motion_640.jpg
--------------------------------------------------------------------------------
/wiki_pics/chroma_ssm.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fourMs/MGT-python/88ca0f526fce071b1817f89ce6e5a439ca9f9111/wiki_pics/chroma_ssm.png
--------------------------------------------------------------------------------
/wiki_pics/colored_waveform.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fourMs/MGT-python/88ca0f526fce071b1817f89ce6e5a439ca9f9111/wiki_pics/colored_waveform.png
--------------------------------------------------------------------------------
/wiki_pics/digital-video.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fourMs/MGT-python/88ca0f526fce071b1817f89ce6e5a439ca9f9111/wiki_pics/digital-video.png
--------------------------------------------------------------------------------
/wiki_pics/directogram.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fourMs/MGT-python/88ca0f526fce071b1817f89ce6e5a439ca9f9111/wiki_pics/directogram.png
--------------------------------------------------------------------------------
/wiki_pics/filtering_filter02_mgx.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fourMs/MGT-python/88ca0f526fce071b1817f89ce6e5a439ca9f9111/wiki_pics/filtering_filter02_mgx.png
--------------------------------------------------------------------------------
/wiki_pics/filtering_filter02_mgy.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fourMs/MGT-python/88ca0f526fce071b1817f89ce6e5a439ca9f9111/wiki_pics/filtering_filter02_mgy.png
--------------------------------------------------------------------------------
/wiki_pics/filtering_filter10_mgx.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fourMs/MGT-python/88ca0f526fce071b1817f89ce6e5a439ca9f9111/wiki_pics/filtering_filter10_mgx.png
--------------------------------------------------------------------------------
/wiki_pics/filtering_filter10_mgy.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fourMs/MGT-python/88ca0f526fce071b1817f89ce6e5a439ca9f9111/wiki_pics/filtering_filter10_mgy.png
--------------------------------------------------------------------------------
/wiki_pics/filtering_filter50_mgx.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fourMs/MGT-python/88ca0f526fce071b1817f89ce6e5a439ca9f9111/wiki_pics/filtering_filter50_mgx.png
--------------------------------------------------------------------------------
/wiki_pics/filtering_filter50_mgy.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fourMs/MGT-python/88ca0f526fce071b1817f89ce6e5a439ca9f9111/wiki_pics/filtering_filter50_mgy.png
--------------------------------------------------------------------------------
/wiki_pics/filtering_nofilter_mgx.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fourMs/MGT-python/88ca0f526fce071b1817f89ce6e5a439ca9f9111/wiki_pics/filtering_nofilter_mgx.png
--------------------------------------------------------------------------------
/wiki_pics/filtering_nofilter_mgy.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fourMs/MGT-python/88ca0f526fce071b1817f89ce6e5a439ca9f9111/wiki_pics/filtering_nofilter_mgy.png
--------------------------------------------------------------------------------
/wiki_pics/flow_dense_example.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fourMs/MGT-python/88ca0f526fce071b1817f89ce6e5a439ca9f9111/wiki_pics/flow_dense_example.gif
--------------------------------------------------------------------------------
/wiki_pics/flow_sparse_example.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fourMs/MGT-python/88ca0f526fce071b1817f89ce6e5a439ca9f9111/wiki_pics/flow_sparse_example.png
--------------------------------------------------------------------------------
/wiki_pics/heatmap_faces.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fourMs/MGT-python/88ca0f526fce071b1817f89ce6e5a439ca9f9111/wiki_pics/heatmap_faces.png
--------------------------------------------------------------------------------
/wiki_pics/history_example.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fourMs/MGT-python/88ca0f526fce071b1817f89ce6e5a439ca9f9111/wiki_pics/history_example.png
--------------------------------------------------------------------------------
/wiki_pics/hpss.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fourMs/MGT-python/88ca0f526fce071b1817f89ce6e5a439ca9f9111/wiki_pics/hpss.png
--------------------------------------------------------------------------------
/wiki_pics/impact_detection.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fourMs/MGT-python/88ca0f526fce071b1817f89ce6e5a439ca9f9111/wiki_pics/impact_detection.png
--------------------------------------------------------------------------------
/wiki_pics/impact_envelopes.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fourMs/MGT-python/88ca0f526fce071b1817f89ce6e5a439ca9f9111/wiki_pics/impact_envelopes.png
--------------------------------------------------------------------------------
/wiki_pics/ipb.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fourMs/MGT-python/88ca0f526fce071b1817f89ce6e5a439ca9f9111/wiki_pics/ipb.png
--------------------------------------------------------------------------------
/wiki_pics/motion-image_640.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fourMs/MGT-python/88ca0f526fce071b1817f89ce6e5a439ca9f9111/wiki_pics/motion-image_640.jpg
--------------------------------------------------------------------------------
/wiki_pics/motion_average_example.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fourMs/MGT-python/88ca0f526fce071b1817f89ce6e5a439ca9f9111/wiki_pics/motion_average_example.png
--------------------------------------------------------------------------------
/wiki_pics/motion_plots.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fourMs/MGT-python/88ca0f526fce071b1817f89ce6e5a439ca9f9111/wiki_pics/motion_plots.png
--------------------------------------------------------------------------------
/wiki_pics/motion_ssm.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fourMs/MGT-python/88ca0f526fce071b1817f89ce6e5a439ca9f9111/wiki_pics/motion_ssm.png
--------------------------------------------------------------------------------
/wiki_pics/motiongram_640.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fourMs/MGT-python/88ca0f526fce071b1817f89ce6e5a439ca9f9111/wiki_pics/motiongram_640.jpg
--------------------------------------------------------------------------------
/wiki_pics/motionhistory_back_and_forth.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fourMs/MGT-python/88ca0f526fce071b1817f89ce6e5a439ca9f9111/wiki_pics/motionhistory_back_and_forth.gif
--------------------------------------------------------------------------------
/wiki_pics/pianist_descriptors.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fourMs/MGT-python/88ca0f526fce071b1817f89ce6e5a439ca9f9111/wiki_pics/pianist_descriptors.png
--------------------------------------------------------------------------------
/wiki_pics/pianist_mgy.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fourMs/MGT-python/88ca0f526fce071b1817f89ce6e5a439ca9f9111/wiki_pics/pianist_mgy.png
--------------------------------------------------------------------------------
/wiki_pics/pianist_spectrogram.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fourMs/MGT-python/88ca0f526fce071b1817f89ce6e5a439ca9f9111/wiki_pics/pianist_spectrogram.png
--------------------------------------------------------------------------------
/wiki_pics/pianist_stacked_figures.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fourMs/MGT-python/88ca0f526fce071b1817f89ce6e5a439ca9f9111/wiki_pics/pianist_stacked_figures.png
--------------------------------------------------------------------------------
/wiki_pics/pianist_tempogram.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fourMs/MGT-python/88ca0f526fce071b1817f89ce6e5a439ca9f9111/wiki_pics/pianist_tempogram.png
--------------------------------------------------------------------------------
/wiki_pics/pianist_vgy.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fourMs/MGT-python/88ca0f526fce071b1817f89ce6e5a439ca9f9111/wiki_pics/pianist_vgy.png
--------------------------------------------------------------------------------
/wiki_pics/pianist_waveform.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fourMs/MGT-python/88ca0f526fce071b1817f89ce6e5a439ca9f9111/wiki_pics/pianist_waveform.png
--------------------------------------------------------------------------------
/wiki_pics/pose_example.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fourMs/MGT-python/88ca0f526fce071b1817f89ce6e5a439ca9f9111/wiki_pics/pose_example.png
--------------------------------------------------------------------------------
/wiki_pics/quantity-of-motion_640.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fourMs/MGT-python/88ca0f526fce071b1817f89ce6e5a439ca9f9111/wiki_pics/quantity-of-motion_640.jpg
--------------------------------------------------------------------------------
/wiki_pics/subtracted.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fourMs/MGT-python/88ca0f526fce071b1817f89ce6e5a439ca9f9111/wiki_pics/subtracted.gif
--------------------------------------------------------------------------------
/wiki_pics/velocity.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fourMs/MGT-python/88ca0f526fce071b1817f89ce6e5a439ca9f9111/wiki_pics/velocity.png
--------------------------------------------------------------------------------
/wiki_pics/video_info_320.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fourMs/MGT-python/88ca0f526fce071b1817f89ce6e5a439ca9f9111/wiki_pics/video_info_320.png
--------------------------------------------------------------------------------
/wiki_pics/videogrid_example.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fourMs/MGT-python/88ca0f526fce071b1817f89ce6e5a439ca9f9111/wiki_pics/videogrid_example.png
--------------------------------------------------------------------------------
/wiki_pics/warp_curve.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fourMs/MGT-python/88ca0f526fce071b1817f89ce6e5a439ca9f9111/wiki_pics/warp_curve.png
--------------------------------------------------------------------------------