├── pytest.ini ├── music ├── legacy │ ├── pieces │ │ ├── __init__.py │ │ └── testSong2.py │ ├── __init__.py │ ├── tables.py │ ├── IteratorSynth.py │ ├── classes.py │ └── CanonicalSynth.py ├── singing │ ├── __init__.py │ ├── bootstrap.py │ └── perform.py ├── structures │ ├── peals │ │ ├── __init__.py │ │ ├── base.py │ │ ├── peals.py │ │ └── plain_changes.py │ ├── __init__.py │ ├── symmetry.py │ └── permutations.py ├── core │ ├── filters │ │ ├── __init__.py │ │ ├── stretches.py │ │ ├── reverb.py │ │ ├── impulse_response.py │ │ ├── fade.py │ │ ├── adsr.py │ │ ├── loud.py │ │ └── localization.py │ ├── synths │ │ ├── __init__.py │ │ ├── noises.py │ │ └── envelopes.py │ ├── __init__.py │ ├── functions.py │ └── io.py ├── tables.py ├── __init__.py └── sequencer.py ├── requirements.txt ├── .gitignore ├── examples ├── singing_demo.py ├── thirty_notes.py ├── isynth.py ├── thirty_numpy_notes.py ├── chromatic_scale.py ├── noisy.py ├── campanology.py ├── binaural_beats.py ├── penta_effects.py └── geometric_music.py ├── notes.md ├── CHANGELOG.md ├── tests ├── test_tables.py ├── test_spectral.py ├── test_bootstrap.py ├── test_sequencer.py ├── test_perform_failures.py ├── test_envelopes.py ├── test_io.py ├── test_notes_extra.py ├── test_synths.py ├── test_additional.py ├── test_utils.py └── test_filters.py ├── LICENSE ├── pyproject.toml └── README.md /pytest.ini: -------------------------------------------------------------------------------- 1 | [pytest] 2 | testpaths = tests 3 | addopts = -p no:warnings 4 | -------------------------------------------------------------------------------- /music/legacy/pieces/__init__.py: -------------------------------------------------------------------------------- 1 | """Example pieces used for testing legacy synthesizers.""" 2 | 3 | from .testSong2 import TestSong2 4 | 5 | __all__ = [ 6 | 'TestSong2' 7 | ] 8 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | colorama==0.4.6 2 | matplotlib==3.7.1 3 | numpy==1.26.4 4 | percolation==0.2.dev0 5 | scipy==1.12.0 6 | setuptools==69.0.2 7 | sympy==1.12 8 | termcolor==2.4.0 9 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | ipython_log.py 2 | *__pycache__* 3 | *.pyc 4 | *.pyo 5 | *.swp 6 | *.pickle 7 | *.ropeproject 8 | *.wav 9 | *.egg-info/ 10 | .DS_Store 11 | dist/ 12 | music/singing/ecantorix/ 13 | musicPacDir/ 14 | -------------------------------------------------------------------------------- /music/singing/__init__.py: -------------------------------------------------------------------------------- 1 | """Simple utilities for text-to-speech demo generation.""" 2 | 3 | from .bootstrap import get_engine, make_test_song, setup_engine 4 | 5 | __all__ = [ 6 | 'get_engine', 7 | 'setup_engine', 8 | 'make_test_song' 9 | ] 10 | -------------------------------------------------------------------------------- /music/structures/peals/__init__.py: -------------------------------------------------------------------------------- 1 | from .peals import Peals, print_peal 2 | from .plain_changes import PlainChanges 3 | from .base import GenericPeal 4 | 5 | __all__ = [ 6 | 'GenericPeal', 7 | 'Peals', 8 | 'PlainChanges', 9 | 'print_peal', 10 | ] 11 | -------------------------------------------------------------------------------- /examples/singing_demo.py: -------------------------------------------------------------------------------- 1 | """Synthesize a short sung phrase using the singing utilities.""" 2 | 3 | import music 4 | 5 | # Clone eCantorix engine if not already installed 6 | music.singing.setup_engine() 7 | 8 | # Render a short sung phrase inside the local cache folder 9 | music.singing.make_test_song() 10 | -------------------------------------------------------------------------------- /music/legacy/__init__.py: -------------------------------------------------------------------------------- 1 | """Legacy synthesizers maintained for backwards compatibility.""" 2 | 3 | from .CanonicalSynth import CanonicalSynth 4 | from .IteratorSynth import IteratorSynth 5 | from .classes import Being 6 | 7 | __all__ = [ 8 | 'Being', 9 | 'CanonicalSynth', 10 | 'IteratorSynth' 11 | ] 12 | -------------------------------------------------------------------------------- /notes.md: -------------------------------------------------------------------------------- 1 | * compare the core/functions.py with the file in MASS. 2 | * organize the routines better by importing functions to specific modules. E.g. core/vibrato.py may import every vibrato-related routine in functions.py 3 | * make examples of the available functionalities 4 | * split music into at least 3 packages, e.g.: structures, core, singing, utils 5 | * describe them as an article 6 | -------------------------------------------------------------------------------- /music/structures/__init__.py: -------------------------------------------------------------------------------- 1 | from .symmetry import ( 2 | dist, 3 | GenericPeal, 4 | InterestingPermutations, 5 | Peals, 6 | PlainChanges, 7 | print_peal, 8 | transpose_permutation 9 | ) 10 | __all__ = [ 11 | 'dist', 12 | 'InterestingPermutations', 13 | 'transpose_permutation', 14 | 'GenericPeal', 15 | 'Peals', 16 | 'PlainChanges', 17 | 'print_peal', 18 | ] 19 | -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | ## [1.0.1] - 2025-07-21 2 | ### Added 3 | - `Sequencer` module for scheduling notes and writing renders. 4 | - `play_audio` utility for quickly previewing sonic vectors. 5 | - `singing_demo` and `binaural_beats` example scripts. 6 | - Mypy configuration and type hints across the package. 7 | 8 | ### Changed 9 | - WAV reading now detects bit depth automatically. 10 | - Various bug fixes and documentation improvements. 11 | 12 | -------------------------------------------------------------------------------- /music/structures/symmetry.py: -------------------------------------------------------------------------------- 1 | """Abstractions for musical permutations and symmetry operations.""" 2 | 3 | from .peals import GenericPeal, Peals, PlainChanges, print_peal 4 | from .permutations import dist, InterestingPermutations, transpose_permutation 5 | 6 | __all__ = [ 7 | 'dist', 8 | 'InterestingPermutations', 9 | 'transpose_permutation', 10 | 'GenericPeal', 11 | 'Peals', 12 | 'PlainChanges', 13 | 'print_peal', 14 | ] 15 | -------------------------------------------------------------------------------- /examples/thirty_notes.py: -------------------------------------------------------------------------------- 1 | from music.legacy import Being 2 | 3 | # 1) start a synth 4 | being = Being() 5 | 6 | # 2) set its parameters using sequences to be iterated through 7 | being.d_ = [1/2, 1/4, 1/4] # durations in seconds 8 | being.fv_ = [0, 1, 5, 15, 150, 1500, 15000] # vibrato frequency 9 | being.nu_ = [5] # vibrato depth in semitones (maximum deviation of pitch) 10 | being.f_ = [220, 330] # frequencies for the notes 11 | 12 | # 3) render the wavfile with 30 notes iterating though the lists above 13 | being.render(30, 'thirty_notes.wav') 14 | -------------------------------------------------------------------------------- /examples/isynth.py: -------------------------------------------------------------------------------- 1 | import music 2 | 3 | tables = music.tables.PrimaryTables() 4 | pe3 = music.structures.PlainChanges(3) 5 | music.structures.symmetry.print_peal(pe3.act(), [0]) 6 | freqs = sum(pe3.act([220, 440, 330]), []) 7 | 8 | isynth = music.legacy.IteratorSynth() 9 | isynth.fundamental_frequency_sequence = freqs 10 | isynth.tab_sequence = [tables.sine, tables.triangle, tables.square, tables.saw] 11 | 12 | pcm_samples = music.utils.horizontal_stack(*[isynth.renderIterate() 13 | for i in range(len(freqs))]) 14 | 15 | music.core.io.write_wav_mono(pcm_samples, 'isynth.wav') 16 | -------------------------------------------------------------------------------- /music/core/filters/__init__.py: -------------------------------------------------------------------------------- 1 | """Collection of audio filters used by the synthesis core.""" 2 | 3 | from .adsr import adsr, adsr_stereo, adsr_vibrato 4 | from .fade import cross_fade, fade 5 | from .impulse_response import fir, iir 6 | from .localization import localize, localize2, localize_linear 7 | from .loud import loud, louds 8 | from .reverb import reverb 9 | from .stretches import stretches 10 | 11 | __all__ = [ 12 | 'adsr', 13 | 'adsr_stereo', 14 | 'adsr_vibrato', 15 | 'cross_fade', 16 | 'fade', 17 | 'fir', 18 | 'iir', 19 | 'localize', 20 | 'localize2', 21 | 'localize_linear', 22 | 'loud', 23 | 'louds', 24 | 'reverb', 25 | 'stretches' 26 | ] 27 | -------------------------------------------------------------------------------- /examples/thirty_numpy_notes.py: -------------------------------------------------------------------------------- 1 | from music.legacy import Being 2 | from music.utils import horizontal_stack 3 | from music.core.io import write_wav_stereo 4 | 5 | # 1) start a ѕynth 6 | being = Being() 7 | 8 | # 3) Use numpy arrays directly and use them to concatenate and/or mix sounds: 9 | s1 = being.render(30) 10 | being.f_ += [440] 11 | being.fv_ = [1, 2, 3, 4, 5] 12 | s2 = being.render(30) 13 | 14 | # s1 then s2 then s1 and s2 at the same time, then at the same time but one in 15 | # each LR channel, then s1 times s2 reversed, then s1+s2 but jumping 6 samples 16 | # before using one: 17 | s3 = horizontal_stack(s1, s2, s1 + s2, (s1, s2), s1*s2[::-1], s1[::7] + 18 | s2[::7]) 19 | write_wav_stereo(s3, 'thirty_numpy_notes.wav') 20 | -------------------------------------------------------------------------------- /examples/chromatic_scale.py: -------------------------------------------------------------------------------- 1 | """ Simple script that writes a chromatic scale on a WAV file. """ 2 | 3 | import music 4 | 5 | scale = [ 6 | 261.63, # C4 7 | 277.18, # C#4 8 | 293.66, # D4 9 | 311.13, # D#4 10 | 329.63, # E4 11 | 349.23, # F4 12 | 369.99, # F#4 13 | 392.00, # G4 14 | 415.30, # G#4 15 | 440.00, # A4 16 | 466.16, # A#4 17 | 493.88 # B4 18 | ] 19 | 20 | sonic_vector = [] 21 | 22 | for note in scale: 23 | sound = music.core.synths.note(freq=note, 24 | duration=0.4) 25 | sonic_vector.append(sound) 26 | 27 | stack = music.utils.horizontal_stack(*sonic_vector) 28 | 29 | music.core.io.write_wav_mono(sonic_vector=stack, 30 | filename='chromatic_scale.wav') 31 | -------------------------------------------------------------------------------- /examples/noisy.py: -------------------------------------------------------------------------------- 1 | """ Simple script that writes a pentatonic scale on a WAV file 2 | with different effects. 3 | """ 4 | 5 | import music 6 | 7 | noises = ['brown', 'pink', 'white', 'blue', 'violet', 'black'] 8 | sonic_vector = [] 9 | silence = music.core.synths.silence(duration=0.4) 10 | beep = music.core.synths.note(duration=0.1) 11 | 12 | for noise in noises: 13 | sonic_vector.append(music.core.synths.noises.noise(noise_type=noise)) 14 | sonic_vector.append(silence) 15 | sonic_vector.append(beep) 16 | sonic_vector.append(silence) 17 | 18 | sonic_vector.append(music.core.synths.noises.gaussian_noise()) 19 | 20 | stack = music.utils.horizontal_stack(*sonic_vector) 21 | music.core.io.write_wav_stereo(sonic_vector=stack, 22 | filename='noisy.wav') 23 | -------------------------------------------------------------------------------- /tests/test_tables.py: -------------------------------------------------------------------------------- 1 | import importlib.util 2 | from pathlib import Path 3 | import numpy as np 4 | 5 | HERE = Path(__file__).resolve().parents[1] 6 | 7 | 8 | def load_module(name, relative_path): 9 | path = HERE / relative_path 10 | spec = importlib.util.spec_from_file_location(name, path) 11 | module = importlib.util.module_from_spec(spec) 12 | spec.loader.exec_module(module) 13 | return module 14 | 15 | tables = load_module('tables', 'music/tables.py') 16 | 17 | 18 | def test_primary_tables_shapes(): 19 | pt = tables.PrimaryTables(size=16) 20 | assert pt.sine.shape == (16,) 21 | assert pt.triangle.shape == (16,) 22 | assert pt.square.shape == (16,) 23 | assert pt.saw.shape == (16,) 24 | # sine first element is 0 and last is close to -step 25 | assert np.isclose(pt.sine[0], 0.0) 26 | 27 | -------------------------------------------------------------------------------- /tests/test_spectral.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import sys 3 | from pathlib import Path 4 | 5 | HERE = Path(__file__).resolve().parents[1] 6 | sys.path.insert(0, str(HERE)) 7 | 8 | import music 9 | 10 | 11 | def _dominant_freq(samples, sample_rate=44100): 12 | freqs = np.fft.rfftfreq(len(samples), 1 / sample_rate) 13 | spectrum = np.fft.rfft(samples) 14 | return freqs[np.argmax(np.abs(spectrum))] 15 | 16 | 17 | def test_note_peak_frequency(): 18 | note = music.note(freq=440, duration=0.1) 19 | peak = _dominant_freq(note) 20 | assert abs(peak - 440) <= 5 21 | 22 | 23 | def test_note_with_fm_peak_frequency(): 24 | note = music.note_with_fm(freq=440, duration=0.1, fm=5, max_fm_deviation=5) 25 | peak = _dominant_freq(note) 26 | assert abs(peak - 440) <= 5 27 | 28 | 29 | def test_note_with_glissando_peak_frequency(): 30 | note = music.note_with_glissando(start_freq=430, end_freq=450, duration=0.1) 31 | peak = _dominant_freq(note) 32 | assert abs(peak - 440) <= 5 33 | -------------------------------------------------------------------------------- /tests/test_bootstrap.py: -------------------------------------------------------------------------------- 1 | import sys 2 | from pathlib import Path 3 | from unittest.mock import patch 4 | 5 | HERE = Path(__file__).resolve().parents[1] 6 | sys.path.insert(0, str(HERE)) 7 | 8 | import music.singing.bootstrap as bootstrap 9 | 10 | 11 | def test_setup_engine_invokes_git_clone_when_missing(): 12 | with patch.object(bootstrap.os.path, 'exists', return_value=False): 13 | with patch.object(bootstrap.subprocess, 'run') as mock_run: 14 | bootstrap.setup_engine(method='http') 15 | mock_run.assert_called_once_with( 16 | ['git', 'clone', 'https://github.com/ttm/ecantorix', bootstrap.ECANTORIXDIR], 17 | check=True 18 | ) 19 | 20 | 21 | def test_setup_engine_skips_when_dir_exists(): 22 | with patch.object(bootstrap.os.path, 'exists', return_value=True): 23 | with patch.object(bootstrap.subprocess, 'run') as mock_run: 24 | bootstrap.setup_engine(method='http') 25 | mock_run.assert_not_called() 26 | -------------------------------------------------------------------------------- /music/core/synths/__init__.py: -------------------------------------------------------------------------------- 1 | """Synthesis primitives for envelopes, notes, and noises.""" 2 | 3 | from .envelopes import am, tremolo, tremolos 4 | from .notes import ( 5 | note, note_with_doppler, note_with_fm, note_with_glissando, 6 | note_with_glissando_vibrato, note_with_phase, note_with_two_vibratos, 7 | note_with_two_vibratos_glissando, note_with_vibrato, 8 | note_with_vibrato_seq_localization, note_with_vibratos_glissandos, trill 9 | ) 10 | from .noises import gaussian_noise, noise, silence 11 | 12 | __all__ = [ 13 | 'am', 14 | 'gaussian_noise', 15 | 'note', 16 | 'note_with_doppler', 17 | 'note_with_fm', 18 | 'note_with_glissando', 19 | 'note_with_glissando_vibrato', 20 | 'note_with_phase', 21 | 'note_with_vibrato', 22 | 'note_with_two_vibratos', 23 | 'note_with_two_vibratos_glissando', 24 | 'note_with_vibratos_glissandos', 25 | 'note_with_vibrato_seq_localization', 26 | 'noise', 27 | 'silence', 28 | 'tremolo', 29 | 'tremolos', 30 | 'trill', 31 | ] 32 | -------------------------------------------------------------------------------- /tests/test_sequencer.py: -------------------------------------------------------------------------------- 1 | import sys 2 | from pathlib import Path 3 | 4 | HERE = Path(__file__).resolve().parents[1] 5 | sys.path.insert(0, str(HERE)) 6 | 7 | import music 8 | 9 | 10 | def test_sequencer_basic_mono(): 11 | seq = music.Sequencer(sample_rate=1000) 12 | seq.add_note(freq=440, start=0.0, duration=0.01) 13 | seq.add_note(freq=440, start=0.02, duration=0.01) 14 | data = seq.render() 15 | assert data.ndim == 1 16 | # length should accommodate last note at 0.02 sec plus duration 0.01 => 30 samples 17 | assert len(data) >= 30 18 | 19 | 20 | def test_sequencer_stereo_spatial(): 21 | seq = music.Sequencer(sample_rate=1000) 22 | seq.add_note( 23 | freq=440, 24 | start=0.0, 25 | duration=0.01, 26 | spatial={"x": 0.1, "y": 0.1}, 27 | ) 28 | out = seq.render() 29 | assert out.shape[0] == 2 30 | 31 | 32 | def test_sequencer_write(tmp_path): 33 | seq = music.Sequencer(sample_rate=1000) 34 | seq.add_note(freq=220, start=0.0, duration=0.005) 35 | out_file = tmp_path / "seq.wav" 36 | seq.write(str(out_file)) 37 | assert out_file.exists() 38 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2024 Renato Fabbri 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /tests/test_perform_failures.py: -------------------------------------------------------------------------------- 1 | import sys 2 | from pathlib import Path 3 | from unittest.mock import patch, mock_open 4 | import subprocess 5 | import pytest 6 | 7 | HERE = Path(__file__).resolve().parents[1] 8 | sys.path.insert(0, str(HERE)) 9 | 10 | import music.singing.perform as perform 11 | 12 | 13 | def test_sing_raises_when_copy_fails(): 14 | with patch.object(perform, 'write_abc'), \ 15 | patch.object(perform.subprocess, 'run', side_effect=subprocess.CalledProcessError(1, ['cp'])), \ 16 | patch.object(perform.wavfile, 'read'), \ 17 | patch('builtins.open', mock_open()): 18 | with pytest.raises(RuntimeError): 19 | perform.sing() 20 | 21 | 22 | def test_sing_raises_when_make_fails(): 23 | def side_effect(args, check): 24 | if args[0] == 'cp': 25 | return None 26 | raise subprocess.CalledProcessError(1, args) 27 | 28 | with patch.object(perform, 'write_abc'), \ 29 | patch.object(perform.subprocess, 'run', side_effect=side_effect), \ 30 | patch.object(perform.wavfile, 'read'), \ 31 | patch('builtins.open', mock_open()): 32 | with pytest.raises(RuntimeError): 33 | perform.sing() 34 | -------------------------------------------------------------------------------- /examples/campanology.py: -------------------------------------------------------------------------------- 1 | import music 2 | 3 | """ 4 | Notice that you might relate a peal or any set of permutations to a sonic 5 | characteristic (frequency, duration, vibrato depth, vibrato frequency, attack 6 | duration, etc.) through at least 3 methods: 7 | 1) initiate a Being(), set its permutations to the permutation sequence, its 8 | domain to the values to be permuted, and its curseq to the name of theBeing 9 | sequence to be yielded by the permutation of the domain. 10 | 2) Achieve the sequence of values through peal.act() or just using permutation 11 | (domain) for all the permutations at hand. Then render the note directly 12 | (e.g. using M.core.V_) or passing the sequence of values to a synth, such 13 | as Being(). 14 | """ 15 | 16 | pe3 = music.structures.peals.PlainChanges(3) 17 | music.structures.symmetry.print_peal(pe3.act(), [0]) 18 | freqs = sum(pe3.act([220, 440, 330]), []) 19 | 20 | nnotes = len(freqs) 21 | 22 | being = music.legacy.Being() 23 | being.f_ = freqs 24 | being.render(nnotes, 'campanology_1.wav') 25 | 26 | # OR 27 | being = music.legacy.Being() 28 | being.domain = [220, 440, 330] 29 | being.perms = pe3.peal_direct 30 | being.f_ = [] 31 | being.curseq = 'f_' 32 | being.stay(nnotes) 33 | being.render(nnotes, 'campanology_2.wav') 34 | -------------------------------------------------------------------------------- /examples/binaural_beats.py: -------------------------------------------------------------------------------- 1 | """Generate a binaural beat using Music's synthesis primitives. 2 | 3 | This script creates two sine waves with a small frequency difference and 4 | applies a gentle tremolo to each channel. Listening to the resulting 5 | stereo file can help create a calm environment for relaxation or focus. 6 | """ 7 | 8 | import numpy as np 9 | import music 10 | 11 | BASE_FREQ = 440.0 # central frequency in Hz 12 | BEAT_FREQ = 4.0 # difference between left and right in Hz 13 | DURATION = 10.0 # seconds 14 | TREMOLO_FREQ = 0.5 # Hz, slow amplitude modulation 15 | 16 | left = music.note_with_phase( 17 | freq=BASE_FREQ - BEAT_FREQ / 2, 18 | duration=DURATION, 19 | waveform_table=music.tables.PrimaryTables().sine, 20 | ) 21 | right = music.note_with_phase( 22 | freq=BASE_FREQ + BEAT_FREQ / 2, 23 | duration=DURATION, 24 | waveform_table=music.tables.PrimaryTables().sine, 25 | ) 26 | 27 | left = music.tremolo( 28 | duration=DURATION, 29 | tremolo_freq=TREMOLO_FREQ, 30 | max_db_dev=3, 31 | sonic_vector=left, 32 | ) 33 | right = music.tremolo( 34 | duration=DURATION, 35 | tremolo_freq=TREMOLO_FREQ, 36 | max_db_dev=3, 37 | sonic_vector=right, 38 | ) 39 | 40 | stereo = np.vstack((left, right)) 41 | 42 | music.write_wav_stereo(stereo, "binaural_beats.wav") 43 | -------------------------------------------------------------------------------- /tests/test_envelopes.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import sys 3 | from pathlib import Path 4 | 5 | HERE = Path(__file__).resolve().parents[1] 6 | sys.path.insert(0, str(HERE)) 7 | 8 | import music 9 | 10 | 11 | # Test amplitude modulation envelope 12 | 13 | def test_am_envelope_range_and_application(): 14 | ns = 1000 15 | env = music.am(number_of_samples=ns, fm=50, max_amplitude=0.3, sonic_vector=None) 16 | assert env.min() >= 1 - 0.3 - 1e-6 17 | assert env.max() <= 1 + 0.3 + 1e-6 18 | 19 | wave = np.ones_like(env) 20 | modulated = music.am(number_of_samples=ns, fm=50, max_amplitude=0.3, sonic_vector=wave) 21 | assert np.max(modulated) > 1 22 | assert np.min(modulated) < 1 23 | 24 | 25 | # Test tremolo envelope 26 | 27 | def test_tremolo_envelope_range_and_application(): 28 | ns = 1000 29 | db_dev = 6 30 | env = music.tremolo(number_of_samples=ns, tremolo_freq=100, max_db_dev=db_dev, sonic_vector=None) 31 | min_val = 10 ** (-db_dev / 20) 32 | max_val = 10 ** (db_dev / 20) 33 | assert env.min() >= min_val - 1e-6 34 | assert env.max() <= max_val + 1e-6 35 | 36 | wave = np.ones_like(env) 37 | modulated = music.tremolo(number_of_samples=ns, tremolo_freq=100, max_db_dev=db_dev, sonic_vector=wave) 38 | assert np.max(modulated) > 1 39 | assert np.min(modulated) < 1 40 | 41 | -------------------------------------------------------------------------------- /music/singing/bootstrap.py: -------------------------------------------------------------------------------- 1 | """Download and configure the eCantorix engine.""" 2 | 3 | import os 4 | import subprocess 5 | from .perform import sing 6 | 7 | here = os.path.abspath(os.path.dirname(__file__)) 8 | ECANTORIXDIR = here + '/ecantorix' 9 | 10 | 11 | def get_engine(): 12 | """Return path to the local eCantorix engine.""" 13 | if not os.path.exists(ECANTORIXDIR): 14 | raise RuntimeError( 15 | "eCantorix engine not found. Run 'setup_engine()' to install it." 16 | ) 17 | return ECANTORIXDIR 18 | 19 | 20 | def setup_engine(method="http"): 21 | """Clone the eCantorix repository for local usage.""" 22 | if os.path.exists(ECANTORIXDIR): 23 | return 24 | 25 | if method == "http": 26 | repo_url = 'https://github.com/ttm/ecantorix' 27 | elif method == "ssh": 28 | repo_url = 'git@github.com:ttm/ecantorix.git' 29 | else: 30 | raise ValueError('method not understood') 31 | 32 | try: 33 | subprocess.run(['git', 'clone', repo_url, ECANTORIXDIR], check=True) 34 | except Exception as exc: 35 | raise RuntimeError(f'Failed to clone repository: {exc}') from exc 36 | return ECANTORIXDIR 37 | 38 | 39 | def make_test_song(): 40 | t = 1 41 | t2 = .5 42 | t4 = .25 43 | text = "hey ma bro, why fly while dive?" 44 | notes = 7, 0, 5, 7, 11, 12, 7, 0 45 | durs = t2, t2, t4, t4, t, t4, t2 46 | sing(text, notes, durs) 47 | -------------------------------------------------------------------------------- /music/legacy/tables.py: -------------------------------------------------------------------------------- 1 | """Legacy lookup tables for common waveforms.""" 2 | 3 | import numpy as np 4 | import pylab as plt 5 | 6 | 7 | class Basic: 8 | """ 9 | Provides primary tables for lookup, including sine, triangle, square, and 10 | saw wave periods. 11 | """ 12 | 13 | def __init__(self, size=2048): 14 | """ 15 | Initializes a Basic object. 16 | 17 | Parameters: 18 | size (int, optional): The size of the tables. Defaults to 2048. 19 | """ 20 | self.size = size 21 | self.make_tables(size) 22 | 23 | def make_tables(self, size): 24 | """ 25 | Creates sine, triangle, square, and saw wave periods. 26 | 27 | Parameters: 28 | size (int): The size of the tables. 29 | """ 30 | self.sine = np.sin(np.linspace(0, 2 * np.pi, size, endpoint=False)) 31 | self.saw = np.linspace(-1, 1, size) 32 | self.square = np.hstack((np.ones(size // 2) * -1, np.ones(size // 2))) 33 | foo = np.linspace(-1, 1, size // 2, endpoint=False) 34 | self.triangle = np.hstack((foo, foo * -1)) 35 | 36 | def draw_tables(self): 37 | """ 38 | Plots the sine, triangle, square, and saw wave periods. 39 | """ 40 | plt.plot(self.sine, "-o") 41 | plt.plot(self.saw, "-o") 42 | plt.plot(self.square, "-o") 43 | plt.plot(self.triangle, "-o") 44 | plt.xlim(-self.size * 0.1, self.size * 1.1) 45 | plt.ylim(-1.1, 1.1) 46 | plt.show() 47 | -------------------------------------------------------------------------------- /tests/test_io.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import sys 3 | from pathlib import Path 4 | from unittest.mock import patch 5 | import pytest 6 | from scipy.io import wavfile 7 | 8 | HERE = Path(__file__).resolve().parents[1] 9 | sys.path.insert(0, str(HERE)) 10 | 11 | import music 12 | 13 | 14 | def test_invalid_bit_depth_mono(): 15 | with pytest.raises(ValueError): 16 | music.write_wav_mono(np.zeros(10), filename="tmp.wav", bit_depth=24) 17 | 18 | 19 | def test_invalid_bit_depth_stereo(): 20 | with pytest.raises(ValueError): 21 | music.write_wav_stereo(np.zeros((2, 10)), filename="tmp.wav", bit_depth=24) 22 | 23 | 24 | def test_read_wav_16bit(tmp_path): 25 | path = tmp_path / "s16.wav" 26 | data = np.array([0, 1000, -1000], dtype=np.int16) 27 | wavfile.write(path, 8000, data) 28 | out = music.read_wav(str(path)) 29 | assert np.allclose(out, data.astype(np.float64) / (2 ** 15)) 30 | 31 | 32 | def test_read_wav_32bit(tmp_path): 33 | path = tmp_path / "s32.wav" 34 | data = np.array([0, 100000, -100000], dtype=np.int32) 35 | wavfile.write(path, 8000, data) 36 | out = music.read_wav(str(path)) 37 | assert np.allclose(out, data.astype(np.float64) / (2 ** 31)) 38 | 39 | 40 | def test_play_audio_invocation(): 41 | import types 42 | from unittest.mock import MagicMock 43 | 44 | sd = types.SimpleNamespace(play=MagicMock(), wait=MagicMock()) 45 | with patch.dict(sys.modules, {"sounddevice": sd}): 46 | music.play_audio(np.zeros(4), sample_rate=8000) 47 | sd.play.assert_called_once() 48 | sd.wait.assert_called_once() 49 | -------------------------------------------------------------------------------- /examples/penta_effects.py: -------------------------------------------------------------------------------- 1 | """ Simple script that writes a pentatonic scale on a WAV file 2 | with different effects. 3 | """ 4 | 5 | import music 6 | 7 | scale = [ 8 | 261.63, # C4 9 | 293.66, # D4 10 | 329.63, # E4 11 | 392.00, # G4 12 | 440.00 # A4 13 | ] 14 | 15 | sonic_vector = [] 16 | for note in scale: 17 | sound = music.core.synths.note(freq=note, 18 | duration=0.4) 19 | sonic_vector.append(sound) 20 | 21 | sonic_vector.append(music.core.synths.silence()) 22 | 23 | for note in scale: 24 | sound = music.core.synths.note_with_glissando(start_freq=note, 25 | end_freq=note+30, 26 | duration=0.4) 27 | sonic_vector.append(sound) 28 | 29 | sonic_vector.append(music.core.synths.silence()) 30 | 31 | for note in scale: 32 | sound = music.core.synths.note_with_vibrato(freq=note, 33 | duration=0.4) 34 | sonic_vector.append(sound) 35 | 36 | sonic_vector.append(music.core.synths.silence()) 37 | 38 | for note in scale: 39 | sound = music.core.synths.note_with_doppler(freq=note, 40 | duration=0.4) 41 | sonic_vector.append(sound) 42 | 43 | sonic_vector.append(music.core.synths.silence()) 44 | 45 | for note in scale: 46 | sound = music.core.synths.note_with_fm(freq=note, 47 | duration=0.4) 48 | sonic_vector.append(sound) 49 | 50 | stack = music.utils.horizontal_stack(*sonic_vector) 51 | music.core.io.write_wav_stereo(sonic_vector=stack, 52 | filename='penta_effects.wav') 53 | -------------------------------------------------------------------------------- /tests/test_notes_extra.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import sys 3 | from pathlib import Path 4 | 5 | HERE = Path(__file__).resolve().parents[1] 6 | sys.path.insert(0, str(HERE)) 7 | 8 | from music.core.synths.notes import ( 9 | note_with_doppler, 10 | note_with_fm, 11 | note_with_phase, 12 | note_with_glissando, 13 | note_with_glissando_vibrato, 14 | note_with_two_vibratos_glissando, 15 | note_with_vibrato, 16 | note_with_two_vibratos, 17 | ) 18 | from music.core.synths.envelopes import tremolo, tremolos 19 | 20 | 21 | def test_extra_note_functions_shapes(): 22 | params = dict(number_of_samples=10, sample_rate=100) 23 | assert note_with_doppler(**params).shape == (2, 10) 24 | assert note_with_fm(fm=0, max_fm_deviation=0, **params).shape == (10,) 25 | assert note_with_phase(phase=0, **params).shape == (10,) 26 | assert note_with_glissando(start_freq=220, end_freq=220, **params).shape == (10,) 27 | assert note_with_glissando_vibrato( 28 | start_freq=220, 29 | end_freq=220, 30 | vibrato_freq=0, 31 | max_pitch_dev=0, 32 | **params 33 | ).shape == (10,) 34 | assert note_with_two_vibratos_glissando( 35 | start_freq=220, 36 | end_freq=220, 37 | vibrato_freq=0, 38 | secondary_vibrato_freq=0, 39 | max_pitch_dev=0, 40 | **params 41 | ).shape == (10,) 42 | assert note_with_vibrato(vibrato_freq=0, max_pitch_dev=0, **params).shape == (10,) 43 | assert note_with_two_vibratos( 44 | vibrato_freq=0, 45 | secondary_vibrato_freq=0, 46 | nu1=0, 47 | nu2=0, 48 | **params 49 | ).shape == (10,) 50 | assert tremolo(number_of_samples=10, tremolo_freq=0, max_db_dev=0, sample_rate=100).shape == (10,) 51 | assert tremolos( 52 | number_of_samples=[[5, 5]], 53 | tremolo_freqs=[[0, 0]], 54 | max_db_devs=[[0, 0]], 55 | sample_rate=100, 56 | ).shape == (10,) 57 | 58 | -------------------------------------------------------------------------------- /tests/test_synths.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import sys 3 | from pathlib import Path 4 | import pytest 5 | import warnings 6 | 7 | HERE = Path(__file__).resolve().parents[1] 8 | sys.path.insert(0, str(HERE)) 9 | 10 | import music 11 | 12 | 13 | def test_note_and_phase_consistency(): 14 | dur = 0.01 15 | n = music.note(freq=440, duration=dur) 16 | n_phase = music.note_with_phase(freq=440, duration=dur, phase=0) 17 | assert len(n) == int(dur * 44100) 18 | assert np.allclose(n, n_phase) 19 | 20 | 21 | def test_note_with_fm_output_shape(): 22 | dur = 0.01 23 | n_fm = music.note_with_fm(freq=440, duration=dur, fm=0, max_fm_deviation=0) 24 | assert len(n_fm) == int(dur * 44100) 25 | assert n_fm.max() <= 1 and n_fm.min() >= -1 26 | 27 | 28 | def test_glissando_and_vibrato_lengths(): 29 | dur = 0.01 30 | g = music.note_with_glissando(start_freq=330, end_freq=330, duration=dur) 31 | assert len(g) == int(dur * 44100) 32 | 33 | g2 = music.note_with_glissando_vibrato( 34 | start_freq=220, end_freq=220, duration=dur, max_pitch_dev=0 35 | ) 36 | assert len(g2) == int(dur * 44100) 37 | 38 | 39 | def test_noise_and_silence_generation(): 40 | sil = music.silence(duration=0.005) 41 | assert np.allclose(sil, np.zeros_like(sil)) 42 | 43 | white = music.noise('white', duration=0.005) 44 | assert len(white) == int(0.005 * 44100) 45 | assert white.max() <= 1 and white.min() >= -1 46 | 47 | gauss = music.gaussian_noise(duration=1) 48 | assert len(gauss) == 44100 49 | assert gauss.max() <= 1 and gauss.min() >= -1 50 | 51 | 52 | def test_noise_no_warnings(): 53 | with warnings.catch_warnings(): 54 | warnings.simplefilter("error") 55 | music.noise('white', duration=0.005) 56 | 57 | 58 | def test_note_with_doppler_stereo_shape(): 59 | data = music.note_with_doppler(number_of_samples=100, stereo=True) 60 | assert data.shape[0] == 2 61 | assert data.shape[1] >= 100 62 | -------------------------------------------------------------------------------- /music/structures/peals/base.py: -------------------------------------------------------------------------------- 1 | class GenericPeal: 2 | """Represents a generic peal. 3 | 4 | Attributes: 5 | nelements (int): The number of elements in the domain. 6 | peals (dict): A dictionary containing the peals and their 7 | corresponding actions. 8 | acted_peals (dict): A dictionary containing the acted peals and their 9 | results. 10 | domain (list): The domain on which the peals are acted. 11 | 12 | Methods: 13 | - act: Acts a specific peal on the specified domain. 14 | - act_all: Acts all peals on the specified domain. 15 | """ 16 | 17 | def __init__(self): 18 | """Initializes a GenericPeal object.""" 19 | self.nelements = None 20 | self.peals = None 21 | self.acted_peals = None 22 | self.domain = None 23 | 24 | def act(self, peal, domain=None): 25 | """Acts a specific peal on the specified domain. 26 | 27 | Parameters: 28 | peal (str): The name of the peal to act. 29 | domain (list, optional): The domain on which to act the peal. 30 | Defaults to None. 31 | 32 | Returns: 33 | list: The result of acting the peal on the specified domain. 34 | """ 35 | if domain is None: 36 | domain = list(range(self.nelements)) 37 | return [i(domain) for i in self.peals[peal]] 38 | 39 | def act_all(self, domain=None): 40 | """Acts all peals on the specified domain. 41 | 42 | Parameters: 43 | domain (list, optional): The domain on which to act the peals. 44 | Defaults to None. 45 | """ 46 | if domain is None: 47 | domain = list(range(self.nelements)) 48 | acted_peals = {} 49 | for peal in self.peals: 50 | acted_peals[peal+"_acted"] = [i(domain) for i in self.peals[peal]] 51 | self.domain = domain 52 | self.acted_peals = acted_peals 53 | -------------------------------------------------------------------------------- /examples/geometric_music.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import music 3 | 4 | # 1) start a ѕynth 5 | being = music.legacy.Being() 6 | 7 | # 2) set its parameters using sequences to be iterated through 8 | being.d_ = [1/2, 1/4, 1/4] # durations in seconds 9 | being.fv_ = [0, 1, 5, 15, 150, 1500, 15000] # vibrato frequency 10 | being.nu_ = [5] # vibrato depth in semitones (maximum deviation of pitch) 11 | being.f_ = [220, 330] # frequencies for the notes 12 | 13 | s1 = being.render(30) 14 | being.f_ += [440] 15 | being.fv_ = [1, 2, 3, 4, 5] 16 | s2 = being.render(30) 17 | s3 = music.utils.horizontal_stack(s1, s2, s1 + s2, (s1, s2), s1*s2[::-1], 18 | s1[::7] + s2[::7]) 19 | 20 | # X) Tweak with special sets of permutations derived from change ringing 21 | # (campanology) or from finite group theory (algebra): 22 | nel = 4 23 | pe4 = music.structures.PlainChanges(nel) 24 | being.perms = pe4.peal_direct 25 | being.domain = [220 * 2 ** (i / 12) for i in (0, 3, 6, 9)] 26 | being.curseq = 'f_' 27 | being.f_ = [] 28 | nnotes = len(being.perms)*nel # len(being.perms) == factorial(nel) 29 | being.stay(nnotes) 30 | being.nu_ = [0] 31 | being.d_ += [1/2] 32 | s4 = being.render(nnotes) 33 | 34 | b2 = music.legacy.Being() 35 | b2.perms = pe4.peal_direct 36 | b2.domain = being.domain[::-1] 37 | b2.curseq = 'f_' 38 | b2.f_ = [] 39 | nnotes = len(being.perms)*nel # len(being.perms) == factorial(nel) 40 | b2.stay(nnotes) 41 | b2.nu_ = [2, 5, 10, 30, 37] 42 | b2.fv_ = [1, 3, 6, 15, 100, 1000, 10000] 43 | b2.d_ = [1, 1/6, 1/6, 1/6] 44 | s42 = b2.render(nnotes) 45 | 46 | i4 = music.structures.permutations.InterestingPermutations(4) 47 | b2.perms = i4.rotations 48 | b2.curseq = 'f_' 49 | b2.f_ = [] 50 | b2.stay(nnotes) 51 | s43 = b2.render(nnotes) 52 | 53 | s43_ = music.core.filters.fade(sonic_vector=s43, duration=5, method='lin') 54 | 55 | diff = s4.shape[0] - s42.shape[0] 56 | s42_ = music.utils.horizontal_stack(s42, np.zeros(diff)) 57 | s_ = music.utils.horizontal_stack(s3, (s42_, s4), s43_) 58 | 59 | music.core.io.write_wav_stereo(s_, 'geometric_music.wav') 60 | -------------------------------------------------------------------------------- /music/legacy/IteratorSynth.py: -------------------------------------------------------------------------------- 1 | """Synthesizer variant that iterates over configurable parameters.""" 2 | 3 | from .CanonicalSynth import CanonicalSynth 4 | 5 | 6 | class IteratorSynth(CanonicalSynth): 7 | """ 8 | A synthesizer that iterates through arbitrary lists of variables. 9 | 10 | Inherits from CanonicalSynth. 11 | 12 | Attributes: 13 | No additional attributes. 14 | 15 | Example: 16 | >>> isynth = M.IteratorSynth() 17 | >>> isynth.fundamental_frequency_sequence = [220, 400, 100, 500] 18 | >>> isynth.duration_sequence = [2, 1, 1.5] 19 | >>> isynth.vibrato_frequency_sequence = [3, 6.5, 10] 20 | >>> sounds = [] 21 | >>> for i in range(300): 22 | sounds += [isynth.renderIterate(tremolo_frequency=.2*i)] 23 | >>> import music.core.io 24 | >>> music.core.io.write_wav_mono(M.H(*sounds),"./example.wav") 25 | """ 26 | 27 | def renderIterate(self, **statevars): 28 | """ 29 | Renders a sound iteration with the given state variables. 30 | 31 | Parameters: 32 | **statevars: Arbitrary keyword arguments for state variables. 33 | 34 | Returns: 35 | list: A list representing the rendered sound. 36 | """ 37 | self.absorbState(**statevars) 38 | self.iterateElements() 39 | return self.render() 40 | 41 | def iterateElements(self): 42 | """ 43 | Iterates through the sequences of state variables. 44 | """ 45 | sequences = [var for var in dir(self) if var.endswith("_sequence")] 46 | state_vars = [i[:-9] for i in sequences] 47 | positions = [i + "_position" for i in sequences] 48 | for sequence, state_var, position in zip(sequences, state_vars, 49 | positions): 50 | if position not in dir(self): 51 | self.__dict__[position] = 0 52 | self.__dict__[state_var] = \ 53 | self.__dict__[sequence][self.__dict__[position]] 54 | self.__dict__[position] += 1 55 | self.__dict__[position] %= len(self.__dict__[sequence]) 56 | -------------------------------------------------------------------------------- /tests/test_additional.py: -------------------------------------------------------------------------------- 1 | import importlib.util 2 | from pathlib import Path 3 | import numpy as np 4 | 5 | HERE = Path(__file__).resolve().parents[1] 6 | 7 | 8 | def load_module(name, relative_path): 9 | path = HERE / relative_path 10 | spec = importlib.util.spec_from_file_location(name, path) 11 | module = importlib.util.module_from_spec(spec) 12 | spec.loader.exec_module(module) 13 | return module 14 | 15 | utils = load_module('utils', 'music/utils.py') 16 | permutations = load_module('permutations', 'music/structures/permutations.py') 17 | 18 | 19 | def test_midi_interval_and_pitch_to_freq(): 20 | assert np.isclose(utils.midi_to_hz_interval(12), 2.0) 21 | assert np.isclose(utils.midi_to_hz_interval(-12), 0.5) 22 | 23 | freqs = utils.pitch_to_freq(start_freq=220, semitones=[0, 12, 7]) 24 | expected = [220 * 2 ** (i / 12) for i in [0, 12, 7]] 25 | assert np.allclose(freqs, expected) 26 | 27 | 28 | def test_rhythm_to_durations_equivalence(): 29 | durations = [4, 2, 2] 30 | result_time = utils.rhythm_to_durations(durations=durations, duration=0.25) 31 | freqs = [4, 8, 8] 32 | result_freq = utils.rhythm_to_durations(freqs=freqs, duration=4) 33 | assert np.allclose(result_time, result_freq) 34 | 35 | nested = utils.rhythm_to_durations(durations=[4, [2, 1, 1], 2], duration=0.5) 36 | assert np.allclose(nested, [2.0, 0.5, 0.5, 1.0]) 37 | 38 | 39 | def test_mix_with_offset_positive_and_negative(): 40 | s1 = np.array([1, 1, 1, 1]) 41 | s2 = np.array([1, 2]) 42 | mixed_pos = utils.mix_with_offset(s1, s2, number_of_samples=2) 43 | assert np.allclose(mixed_pos, [1, 1, 2, 3]) 44 | 45 | mixed_neg = utils.mix_with_offset(s1, s2, number_of_samples=-1) 46 | assert np.allclose(mixed_neg, [1, 1, 1, 2, 2]) 47 | 48 | 49 | def test_permutation_helpers(): 50 | from sympy.combinatorics import Permutation 51 | 52 | swap = Permutation(0, 3, size=4) 53 | assert permutations.dist(swap) == 1 54 | 55 | perm = Permutation([2, 0, 1]) 56 | transposed = permutations.transpose_permutation(perm, 1) 57 | assert transposed == Permutation(1, 2, 3) 58 | assert permutations.transpose_permutation(perm, 0) == perm 59 | -------------------------------------------------------------------------------- /music/core/filters/stretches.py: -------------------------------------------------------------------------------- 1 | """Time-stretching utilities for manipulating audio segments.""" 2 | 3 | import numpy as np 4 | from music.utils import horizontal_stack 5 | 6 | 7 | def stretches(x, durations=(1, 4, 8, 12), sample_rate=44100): 8 | """ 9 | Makes a sequence of squeezes of the fragment in x. 10 | 11 | Parameters 12 | ---------- 13 | x : array_like 14 | The samples made to repeat as original or squeezed. Assumed to be in 15 | the form (channels, samples), i.e. x[1][120] is the 120th sample of 16 | the second channel. 17 | durations : list of numbers 18 | Durations in seconds for each repeat of x. 19 | 20 | Examples 21 | -------- 22 | >>> asound = horizontal_stack(*[note_with_vibrato(freq=i, vibrato_freq=j) 23 | ... for i, j in zip([220,440,330,440,330], 24 | ... [.5,15,6,5,30])]) 25 | >>> s = stretches(asound) 26 | >>> s = stretches(asound, 27 | ... durations=[.2, .3] * 10 + [.1, .2, .3, .4] * 8 + 28 | ... [.5, 1.5, .5, 1., 5., .5, .25, .25, .5, 1., .5] * 2) 29 | >>> write_wav_mono(durations, 'stretches.wav') 30 | 31 | Notes 32 | ----- 33 | This function is useful to render musical sequences given any material. 34 | PS: not clear if this function is already useful. 35 | 36 | """ 37 | x = np.array(x) 38 | 39 | s_ = durations * sample_rate 40 | obj = object() 41 | obj.foo = s_ 42 | if len(x.shape) == 1: 43 | length = x.shape[0] 44 | stereo = False 45 | else: 46 | length = x.shape[1] 47 | stereo = True 48 | ns = length / sample_rate 49 | ns_ = [ns / i for i in durations] 50 | obj.bar = ns_ 51 | # x[::ns] (mono) or x[:, ::ns] stereo is the sound in one second 52 | # for any duration s[i], use ns_ = ns//s[i] 53 | # x[np.arange(0, len(x), ns_[i])] 54 | sound = [] 55 | for ss in durations: 56 | if ns/ss >= 1: 57 | indexes = np.arange(0, length, ns / ss).round().astype(np.int64) 58 | else: 59 | indexes = np.arange(0, length - 1, ns / ss).round().astype( 60 | np.int64) 61 | if stereo: 62 | segment = x[:, indexes] 63 | else: 64 | segment = x[indexes] 65 | sound.append(segment) 66 | sound_ = horizontal_stack(*sound) 67 | return sound_ 68 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = 'music' 3 | version = '1.0.1' 4 | authors = [ 5 | { name = 'Renato Fabbri', email = 'renato.fabbri@gmail.com' }, 6 | { name = 'Jacopo Donati', email = 'jacopo.donati@gmail.com' } 7 | ] 8 | description = 'Extreme-fidelity synthesis of musical elements, based on the MASS framework' 9 | license = { file = "LICENSE" } 10 | readme = 'README.md' 11 | requires-python = '>=3.0' 12 | dependencies = [ 13 | 'colorama >= 0.4.6', 14 | 'matplotlib >= 3.7.1', 15 | 'numpy >= 1.26.4', 16 | 'scipy >= 1.12.0', 17 | 'sympy >= 1.12', 18 | 'termcolor >= 2.4.0', 19 | ] 20 | optional-dependencies = { dev = ['pytest >= 8.2', 'mypy >= 1.8'] } 21 | classifiers = [ 22 | 'Programming Language :: Python :: 3', 23 | 'License :: OSI Approved :: MIT License', 24 | 'Operating System :: OS Independent', 25 | 'Development Status :: 5 - Production/Stable', 26 | 'Intended Audience :: Science/Research', 27 | 'Intended Audience :: Healthcare Industry', 28 | 'Intended Audience :: Telecommunications Industry', 29 | 'Intended Audience :: Developers', 30 | 'Intended Audience :: Education', 31 | 'Intended Audience :: Religion', 32 | 'Topic :: Scientific/Engineering :: Physics', 33 | 'Topic :: Scientific/Engineering :: Visualization', 34 | 'Topic :: Scientific/Engineering :: Information Analysis', 35 | 'Topic :: Multimedia :: Sound/Audio :: Sound Synthesis', 36 | 'Topic :: Multimedia :: Sound/Audio :: Editors', 37 | 'Topic :: Multimedia :: Sound/Audio :: Mixers', 38 | 'Topic :: Multimedia :: Sound/Audio :: Speech', 39 | 'Topic :: Multimedia :: Sound/Audio :: Sound Synthesis', 40 | 'Topic :: Artistic Software', 41 | ] 42 | keywords = [ 43 | 'acoustics', 44 | 'AM', 45 | 'art', 46 | 'audio', 47 | 'campanology', 48 | 'change ringing', 49 | 'filter', 50 | 'FM', 51 | 'HRTF', 52 | 'LUT', 53 | 'MASS', 54 | 'multimedia', 55 | 'music', 56 | 'noise', 57 | 'PCM', 58 | 'permutation', 59 | 'physics', 60 | 'psychophysics', 61 | 'signal processing', 62 | 'sing', 63 | 'sound', 64 | 'spatialization', 65 | 'speech', 66 | 'synth' 67 | ] 68 | 69 | [project.urls] 70 | Homepage = 'https://github.com/ttm/music' 71 | Issues = 'https://github.com/ttm/music/issues' 72 | 73 | [build-system] 74 | requires = ['setuptools>=61.0'] 75 | build-backend = 'setuptools.build_meta' 76 | 77 | [tool.mypy] 78 | python_version = '3.11' 79 | files = ['music'] 80 | ignore_missing_imports = true 81 | -------------------------------------------------------------------------------- /tests/test_utils.py: -------------------------------------------------------------------------------- 1 | import importlib.util 2 | from pathlib import Path 3 | import numpy as np 4 | import pytest 5 | import warnings 6 | 7 | HERE = Path(__file__).resolve().parents[1] 8 | 9 | 10 | def load_module(name, relative_path): 11 | path = HERE / relative_path 12 | spec = importlib.util.spec_from_file_location(name, path) 13 | module = importlib.util.module_from_spec(spec) 14 | spec.loader.exec_module(module) 15 | return module 16 | 17 | utils = load_module('utils', 'music/utils.py') 18 | functions = load_module('functions', 'music/core/functions.py') 19 | 20 | def test_db_amp_conversion(): 21 | values = np.array([-12, -6, 0, 6, 12]) 22 | amps = utils.db_to_amp(values) 23 | back = utils.amp_to_db(amps) 24 | assert np.allclose(back, values) 25 | 26 | 27 | def test_hz_midi_conversion(): 28 | freqs = np.array([220.0, 440.0, 880.0]) 29 | midis = utils.hz_to_midi(freqs) 30 | back = utils.midi_to_hz(midis) 31 | assert np.allclose(back, freqs) 32 | 33 | 34 | def test_horizontal_stack_and_convert_to_stereo(): 35 | m1 = np.arange(4) 36 | m2 = np.arange(4) + 4 37 | stereo = np.vstack((np.arange(4), np.arange(4) + 10)) 38 | stacked = utils.horizontal_stack(m1, stereo, m2) 39 | assert stacked.shape == (2, 12) 40 | conv = utils.convert_to_stereo(m1) 41 | assert conv.shape == (2, 4) 42 | assert np.allclose(conv[0], m1) 43 | multi = np.vstack((m1, m1 + 10, m1 + 20)) 44 | conv_multi = utils.convert_to_stereo(multi) 45 | expected = np.vstack((multi[0] + multi[2], multi[1] + multi[2])) 46 | assert np.allclose(conv_multi, expected) 47 | 48 | 49 | def test_mix_and_normalize(): 50 | a = np.ones(5) 51 | b = np.arange(3) 52 | mixed = utils.mix(a, b) 53 | expected = a.copy() 54 | expected[:3] += b 55 | assert np.allclose(mixed, expected) 56 | norm = functions.normalize_mono(mixed) 57 | assert np.max(norm) <= 1 and np.min(norm) >= -1 58 | 59 | 60 | def test_mix2_basic(): 61 | a = np.array([1, 1, 1]) 62 | b = np.array([1, 2]) 63 | mixed = utils.mix2([a, b]) 64 | assert np.allclose(mixed, np.array([2, 3, 1])) 65 | 66 | 67 | def test_mix2_offset_and_end(): 68 | a = np.array([1, 1]) 69 | b = np.array([1, 1, 1]) 70 | out = utils.mix2([a, b], end=True) 71 | assert np.allclose(out, np.array([1, 2, 2])) 72 | 73 | out_offset = utils.mix2([a, b], offset=[0, 1], sample_rate=1) 74 | assert np.allclose(out_offset, np.array([1, 2, 1, 1])) 75 | 76 | 77 | def test_hz_to_midi_no_warnings(): 78 | with warnings.catch_warnings(): 79 | warnings.simplefilter("error") 80 | utils.hz_to_midi(np.array([0.0, 440.0])) 81 | -------------------------------------------------------------------------------- /music/core/__init__.py: -------------------------------------------------------------------------------- 1 | """Core audio signal processing module.""" 2 | 3 | from ..utils import ( 4 | amp_to_db, 5 | convert_to_stereo, 6 | db_to_amp, 7 | horizontal_stack, 8 | hz_to_midi, 9 | midi_to_hz_interval, 10 | midi_to_hz, 11 | mix_stereo, 12 | mix_with_offset_, 13 | mix_with_offset, 14 | mix, 15 | mix2, 16 | pan_transitions, 17 | pitch_to_freq, 18 | profile, 19 | resolve_stereo, 20 | rhythm_to_durations 21 | ) 22 | from .filters import ( 23 | adsr, 24 | adsr_stereo, 25 | adsr_vibrato, 26 | cross_fade, 27 | fade, 28 | fir, 29 | iir, 30 | localize, 31 | localize2, 32 | localize_linear, 33 | loud, 34 | louds, 35 | reverb, 36 | stretches 37 | ) 38 | from .functions import normalize_mono, normalize_stereo 39 | from .io import read_wav, write_wav_mono, write_wav_stereo, play_audio 40 | from .synths import ( 41 | am, 42 | gaussian_noise, 43 | note, 44 | note_with_doppler, 45 | note_with_fm, 46 | note_with_glissando, 47 | note_with_glissando_vibrato, 48 | note_with_phase, 49 | note_with_vibrato, 50 | note_with_two_vibratos, 51 | note_with_two_vibratos_glissando, 52 | note_with_vibratos_glissandos, 53 | note_with_vibrato_seq_localization, 54 | noise, 55 | silence, 56 | tremolo, 57 | tremolos, 58 | trill 59 | ) 60 | 61 | __all__ = [ 62 | 'adsr_stereo', 63 | 'adsr_vibrato', 64 | 'adsr', 65 | 'am', 66 | 'amp_to_db', 67 | 'convert_to_stereo', 68 | 'cross_fade', 69 | 'db_to_amp', 70 | 'fade', 71 | 'fir', 72 | 'gaussian_noise', 73 | 'horizontal_stack', 74 | 'hz_to_midi', 75 | 'iir', 76 | 'localize_linear', 77 | 'localize', 78 | 'localize2', 79 | 'loud', 80 | 'louds', 81 | 'midi_to_hz_interval', 82 | 'midi_to_hz', 83 | 'mix_stereo', 84 | 'mix_with_offset_', 85 | 'mix_with_offset', 86 | 'mix', 87 | 'mix2', 88 | 'noise', 89 | 'normalize_mono', 90 | 'normalize_stereo', 91 | 'note_with_doppler', 92 | 'note_with_fm', 93 | 'note_with_glissando_vibrato', 94 | 'note_with_glissando', 95 | 'note_with_phase', 96 | 'note_with_two_vibratos_glissando', 97 | 'note_with_two_vibratos', 98 | 'note_with_vibrato_seq_localization', 99 | 'note_with_vibrato', 100 | 'note_with_vibratos_glissandos', 101 | 'note', 102 | 'pan_transitions', 103 | 'pitch_to_freq', 104 | 'profile', 105 | 'read_wav', 106 | 'resolve_stereo', 107 | 'reverb', 108 | 'rhythm_to_durations', 109 | 'silence', 110 | 'stretches', 111 | 'tremolo', 112 | 'tremolos', 113 | 'trill', 114 | 'write_wav_mono', 115 | 'write_wav_stereo', 116 | 'play_audio' 117 | ] 118 | -------------------------------------------------------------------------------- /music/structures/peals/peals.py: -------------------------------------------------------------------------------- 1 | """ 2 | Provides functions for generating and representing peals using permutations. 3 | """ 4 | 5 | from sympy.combinatorics import Permutation 6 | from termcolor import colored 7 | from colorama import init 8 | from ..permutations import InterestingPermutations 9 | 10 | init() 11 | 12 | 13 | def print_peal(peal, hunts=(0, 1)): 14 | """ 15 | Prints a peal with colored numbers. Hunts have also colored background. 16 | 17 | Parameters: 18 | peal (list): The peal to print. 19 | hunts (list, optional): The indices of hunted elements. Defaults to 20 | [0, 1]. 21 | """ 22 | colors = 'yellow', 'magenta', 'green', 'red', 'blue', 'white', 'grey', \ 23 | 'cyan' 24 | hcolors = 'on_white', 'on_blue', 'on_red', 'on_grey', 'on_yellow', \ 25 | 'on_magenta', 'on_green', 'on_cyan' 26 | final_string = '' 27 | for sequence in peal: 28 | final_string += ''.join( 29 | colored(i, colors[i], hcolors[-(i + 1)]) if i in hunts else 30 | colored(i, colors[i], "on_white", ["bold"]) for i in sequence) + \ 31 | '\n' 32 | print(final_string) 33 | 34 | 35 | class Peals(InterestingPermutations): 36 | """ 37 | Uses permutations to make peals and represents peals as permutations. 38 | 39 | Notes: 40 | Core reference: 41 | - http://www.gutenberg.org/files/18567/18567-h/18567-h.htm 42 | 43 | Also check peal rules, such as conditions for trueness. 44 | - Wikipedia seemed ok last time. 45 | """ 46 | 47 | def __init__(self): 48 | """ 49 | Initializes a Peals object. 50 | """ 51 | InterestingPermutations.__init__(self) 52 | self.peals = [] 53 | # Base peals can be created here when implementations become available 54 | # self.transpositions_peal(self.peals["rotation_peal"][1]) 55 | 56 | def transpositions_peal(self, permutation, peal_name="transposition_peal"): 57 | """Generates a peal from transpositions of a permutation. 58 | 59 | Parameters: 60 | permutation (Permutation): The permutation to generate 61 | transpositions from. 62 | peal_name (str, optional): The name of the peal. Defaults to 63 | "transposition_peal". 64 | """ 65 | self.peals[peal_name] = [Permutation(i) 66 | for i in permutation.transpositions()] 67 | 68 | def twenty_all_over(self): 69 | """Placeholder for a 20 all over peal implementation.""" 70 | raise NotImplementedError("twenty_all_over is not yet implemented") 71 | 72 | def an_eight_and_forty(self): 73 | """Placeholder for an eight and forty peal implementation.""" 74 | raise NotImplementedError("an_eight_and_forty is not yet implemented") 75 | -------------------------------------------------------------------------------- /music/core/filters/reverb.py: -------------------------------------------------------------------------------- 1 | """Simple reverberation filters and impulse response generation.""" 2 | 3 | import numpy as np 4 | from ..synths import noise 5 | 6 | 7 | def reverb(duration=1.9, first_phase_duration=0.15, decay=-50, 8 | noise_type="brown", sonic_vector=0, sample_rate=44100): 9 | """ 10 | Apply an artificial reverberation or return the impulse response. 11 | 12 | Parameters 13 | ---------- 14 | duration : scalar 15 | The total duration of the reverberation in seconds. 16 | first_phase_duration : scalar 17 | The duration of the first phase of the reverberation in seconds. 18 | decay : scalar 19 | The total decay of the last incidence in decibels. 20 | noise_type : string or scalar 21 | A string or scalar specifying the noise. Passed to 22 | noises(ntype=scalar). 23 | sonic_vector : array_like 24 | An optional one dimensional array for the reverberation to be applied. 25 | sample_rate : scalar 26 | The sampling frequency. 27 | 28 | Returns 29 | ------- 30 | result : numpy.ndarray 31 | An array with the impulse response of the reverberation. If 32 | sonic_vector is specified, the reverberation applied to sonic_vector. 33 | 34 | Notes 35 | ----- 36 | This is a simple artificial reverberation with a progressive loudness 37 | decay of the reincidences of the sound and with two periods: the first 38 | consists of scattered reincidences, the second period reincidences is 39 | modeled by a noise. 40 | 41 | Comparing with the description in [1], the frequency bands are ignored. 42 | 43 | One might want to run this function twice to obtain a stereo reverberation. 44 | 45 | Cite the following article whenever you use this function. 46 | 47 | References 48 | ---------- 49 | .. [1] Fabbri, Renato, et al. "Musical elements in the discrete-time 50 | representation of sound." arXiv preprint arXiv:abs/1412.6853 (2017) 51 | 52 | """ 53 | lambda_r = int(duration * sample_rate) 54 | lambda1 = int(first_phase_duration * sample_rate) 55 | # Sound reincidence probability in the first period: 56 | ii = np.arange(lambda_r) 57 | p = (ii[:lambda1] / lambda1) ** 2. 58 | # incidences: 59 | r1_ = np.random.random(lambda1) < p 60 | a = 10. ** ((decay / 20) * (ii / (lambda_r - 1))) 61 | # Eq. 76 First period of reverberation: 62 | r1 = r1_ * a[:lambda1] # first incidences 63 | 64 | # Eq. 77 Second period of reverberation: 65 | noise_ = noise(noise_type, max_freq=sample_rate / 2, 66 | number_of_samples=lambda_r - lambda1) 67 | r2 = noise_ * a[lambda1:lambda_r] 68 | 69 | # Eq. 78 Impulse response of the reverberation 70 | result = np.hstack((r1, r2)) 71 | result[0] = 1. 72 | if type(sonic_vector) in (np.ndarray, list): 73 | return np.convolve(sonic_vector, result) 74 | else: 75 | return result 76 | -------------------------------------------------------------------------------- /music/tables.py: -------------------------------------------------------------------------------- 1 | """Provides primary tables for waveform lookup. 2 | 3 | This module contains the `PrimaryTables` class, which allows the creation of 4 | sine, triangle, square, and saw wave periods with a given number of samples. 5 | It also provides a method to visualize these waveform tables. 6 | 7 | Example: 8 | To create and visualize waveform tables: 9 | 10 | >>> from music import PrimaryTables 11 | >>> PrimaryTables.__module__ # confirm correct package name 12 | 'music.tables' 13 | >>> primary_tables = PrimaryTables() 14 | >>> primary_tables.draw_tables() 15 | 16 | Classes: 17 | - PrimaryTables: Provides primary tables for waveform lookup. 18 | """ 19 | import numpy as np 20 | import pylab as p 21 | 22 | 23 | class PrimaryTables: 24 | """Provides primary tables for waveform lookup. 25 | 26 | This class creates sine, triangle, square, and saw wave periods 27 | with a given number of samples. 28 | 29 | Parameters 30 | ---------- 31 | size : int, optional 32 | The number of samples for each waveform table, by default 2048. 33 | 34 | Attributes 35 | ---------- 36 | sine : ndarray 37 | The sine wave table. 38 | triangle : ndarray 39 | The triangle wave table. 40 | square : ndarray 41 | The square wave table. 42 | saw : ndarray 43 | The sawtooth wave table. 44 | size : int 45 | The number of samples for each waveform table. 46 | 47 | Examples 48 | -------- 49 | >>> primary_tables = PrimaryTables() 50 | >>> primary_tables.draw_tables() # Draw the waveform tables 51 | """ 52 | def __init__(self, size=2048): 53 | """Initialize the PrimaryTables class. 54 | 55 | Parameters 56 | ---------- 57 | size : int, optional 58 | The number of samples for each waveform table, by default 2048. 59 | """ 60 | self.triangle = None 61 | self.square = None 62 | self.saw = None 63 | self.sine = None 64 | self.size = size 65 | self.make_tables(size) 66 | 67 | def make_tables(self, size): 68 | """Create waveform tables. 69 | 70 | Parameters 71 | ---------- 72 | size : int 73 | The number of samples for each waveform table. 74 | """ 75 | self.sine = np.sin(np.linspace(0, 2 * np.pi, size, endpoint=False)) 76 | self.saw = np.linspace(-1, 1, size) 77 | self.square = np.hstack((np.ones(size // 2) * -1, np.ones(size // 2))) 78 | foo = np.linspace(-1, 1, size // 2, endpoint=False) 79 | self.triangle = np.hstack((foo, foo * -1)) 80 | 81 | def draw_tables(self): 82 | """Draw waveform tables.""" 83 | p.plot(self.sine, "-o") 84 | p.plot(self.saw, "-o") 85 | p.plot(self.square, "-o") 86 | p.plot(self.triangle, "-o") 87 | p.xlim(-self.size * 0.1, self.size * 1.1) 88 | p.ylim(-1.1, 1.1) 89 | p.show() 90 | -------------------------------------------------------------------------------- /music/core/filters/impulse_response.py: -------------------------------------------------------------------------------- 1 | """Finite impulse response and related filters.""" 2 | 3 | import numpy as np 4 | 5 | 6 | def fir(samples, sonic_vector, freq=True, max_freq=True): 7 | """ 8 | Apply a FIR filter to a sonic_array. 9 | 10 | Parameters 11 | ---------- 12 | samples : array_like 13 | A sequence of absolute values for the frequencies (if freq=True) or 14 | samples of an impulse response. 15 | sonic_vector : array_like 16 | An one-dimensional array with the PCM samples of the signal (e.g. 17 | sound) for the FIR filter to be applied to. 18 | freq : boolean 19 | Set to True if samples holds frequency amplitude absolute values or 20 | False if samples is an impulse response. If max_freq=True, the 21 | separations between the frequencies are: fs / (2 * N - 2). 22 | If max_freq=False, the separation between the frequencies are 23 | fs / (2 * N - 1). Where N is the length of the provided samples. 24 | max_freq : boolean 25 | Set to True if the last item in the samples is related to the Nyquist 26 | frequency fs / 2. Ignored if freq=False. 27 | 28 | Notes 29 | ----- 30 | If freq=True, the samples are the absolute values of the frequency 31 | components. The phases are set to zero to maintain the phases of the 32 | components of the original signal. 33 | 34 | """ 35 | if not freq: 36 | return np.convolve(samples, sonic_vector) 37 | if max_freq: 38 | s = np.hstack((samples, samples[1:-1][::-1])) 39 | else: 40 | s = np.hstack((samples, samples[1:][::-1])) 41 | return np.convolve(s, sonic_vector) 42 | 43 | 44 | def iir(sonic_vector, a, b): 45 | """ 46 | Apply an IIR filter to a signal. 47 | 48 | Parameters 49 | ---------- 50 | sonic_vector : array_like 51 | An one-dimensional array representing the signal (potentially a sound) 52 | for the filter to by applied to. 53 | a : iterable of scalars 54 | The feedforward coefficients. 55 | b : iterable of scalars 56 | The feedback filter coefficients. 57 | 58 | Notes 59 | ----- 60 | Check [1] to know more about this function. 61 | 62 | Cite the following article whenever you use this function. 63 | 64 | References 65 | ---------- 66 | .. [1] Fabbri, Renato, et al. "Musical elements in the 67 | discrete-time representation of sound." 68 | arXiv preprint arXiv:abs/1412.6853 (2017) 69 | 70 | """ 71 | signal = sonic_vector 72 | signal_ = [] 73 | for i in range(len(signal)): 74 | samples_a = signal[i::-1][:len(a)] 75 | a_coeffs = a[:i + 1] 76 | a_contrib = (samples_a * a_coeffs).sum() 77 | 78 | samples_b = signal_[-1:-1 - i:-1][:len(b) - 1] 79 | b_coeffs = b[1:i + 1] 80 | b_contrib = (samples_b * b_coeffs).sum() 81 | t_i = (a_contrib + b_contrib) / b[0] 82 | signal_.append(t_i) 83 | return np.array(signal_) 84 | -------------------------------------------------------------------------------- /music/core/functions.py: -------------------------------------------------------------------------------- 1 | """Core audio processing utilities reused across the package.""" 2 | import numpy as np 3 | 4 | 5 | def normalize_mono(sonic_vector, remove_bias=True): 6 | """ 7 | Normalize a mono sonic vector. 8 | 9 | The final array will have values only between -1 and 1. 10 | 11 | Parameters 12 | ---------- 13 | sonic_vector : array_like 14 | A (nsamples,) shaped array. 15 | remove_bias : boolean 16 | Whether to remove or not the bias (or offset) 17 | 18 | Returns 19 | ------- 20 | s : ndarray 21 | A numpy array with values between -1 and 1. 22 | remove_bias : boolean 23 | Whether to remove or not the bias (or offset) 24 | 25 | """ 26 | t = np.array(sonic_vector) 27 | if np.all(t == 0): 28 | return t 29 | else: 30 | if remove_bias: 31 | s = t - t.mean() 32 | fact = max(s.max(), -s.min()) 33 | s = s / fact 34 | else: 35 | s = ((t - t.min()) / (t.max() - t.min())) * 2. - 1. 36 | return s 37 | 38 | 39 | def normalize_stereo(sonic_vector, remove_bias=True, normalize_sep=False): 40 | """ 41 | Normalize a stereo sonic vector. 42 | 43 | The final array will have values only between -1 and 1. 44 | 45 | Parameters 46 | ---------- 47 | sonic_vector : array_like 48 | A (2, nsamples) shaped array. 49 | remove_bias : boolean 50 | Whether to remove or not the bias (or offset) 51 | normalize_sep : boolean 52 | Set to True if each channel should be normalized separately. 53 | If False (default), the arrays will be rescaled in the same proportion 54 | (preserves loudness proportion). 55 | 56 | Returns 57 | ------- 58 | sv_normalized : ndarray 59 | A numpy array with values between -1 and 1. 60 | 61 | """ 62 | sv_copy = np.array(sonic_vector) 63 | if np.all(sv_copy == 0): 64 | return sv_copy 65 | 66 | if remove_bias: 67 | sv_normalized = sv_copy 68 | sv_normalized[0] = sv_normalized[0] - sv_normalized[0].mean() 69 | sv_normalized[1] = sv_normalized[1] - sv_normalized[1].mean() 70 | if normalize_sep: 71 | fact = max(sv_normalized[0].max(), -sv_normalized[0].min()) 72 | sv_normalized[0] = sv_normalized[0] / fact 73 | fact = max(sv_normalized[1].max(), -sv_normalized[1].min()) 74 | sv_normalized[1] = sv_normalized[1] / fact 75 | else: 76 | fact = max(sv_normalized.max(), -sv_normalized.min()) 77 | sv_normalized = sv_normalized / fact 78 | else: 79 | amplitude_ch_1 = sv_copy[0].max() - sv_copy[0].min() 80 | amplitude_ch_2 = sv_copy[1].max() - sv_copy[1].min() 81 | if normalize_sep: 82 | sv_copy[0] = (sv_copy[0] - sv_copy[0].min()) / amplitude_ch_1 83 | sv_copy[1] = (sv_copy[1] - sv_copy[1].min()) / amplitude_ch_2 84 | sv_normalized = sv_copy * 2 - 1 85 | else: 86 | amplitude = max(amplitude_ch_1, amplitude_ch_2) 87 | sv_copy = (sv_copy - sv_copy.min()) / amplitude 88 | sv_normalized = sv_copy * 2 - 1 89 | return sv_normalized 90 | -------------------------------------------------------------------------------- /tests/test_filters.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import sys 3 | from pathlib import Path 4 | 5 | HERE = Path(__file__).resolve().parents[1] 6 | sys.path.insert(0, str(HERE)) 7 | 8 | from music.core.filters import ( 9 | adsr, 10 | fade, 11 | cross_fade, 12 | reverb, 13 | loud, 14 | louds, 15 | ) 16 | from music.core.filters.localization import localize 17 | 18 | 19 | def test_adsr_envelope_basic(): 20 | env = adsr( 21 | envelope_duration=0.1, 22 | attack_duration=10, 23 | decay_duration=20, 24 | sustain_level=-6, 25 | release_duration=10, 26 | transition="exp", 27 | sample_rate=1000, 28 | ) 29 | sustain_amp = 10 ** (-6 / 20) 30 | assert len(env) == 100 31 | assert env[0] < 1e-3 32 | assert np.isclose(env[9], 1.0, atol=1e-6) 33 | assert np.allclose(env[30:90], sustain_amp) 34 | assert env[-1] < 1e-4 35 | 36 | 37 | def test_fade_and_cross_fade(): 38 | fade_out = fade(number_of_samples=5, fade_out=True, method="linear") 39 | fade_in = fade(number_of_samples=5, fade_out=False, method="linear") 40 | assert np.allclose(fade_out, np.linspace(1, 0, 5)) 41 | assert np.allclose(fade_in, np.linspace(0, 1, 5)) 42 | 43 | s1 = np.ones(441) 44 | s2 = np.ones(441) * 2 45 | mixed = cross_fade(s1.copy(), s2.copy(), duration=5, sample_rate=44100) 46 | assert len(mixed) == 661 47 | assert mixed[0] == 1.0 48 | assert np.isclose(mixed[-1], 2.0, atol=1e-6) 49 | 50 | 51 | def test_reverb_minimal_operation(): 52 | np.random.seed(0) 53 | ir = reverb( 54 | duration=0.02, 55 | first_phase_duration=0.01, 56 | decay=-1, 57 | noise_type="white", 58 | sample_rate=100, 59 | ) 60 | assert len(ir) == 2 61 | assert ir[0] == 1.0 62 | 63 | out = reverb( 64 | duration=0.02, 65 | first_phase_duration=0.01, 66 | decay=-1, 67 | noise_type="white", 68 | sonic_vector=np.ones(5), 69 | sample_rate=100, 70 | ) 71 | assert out.shape == (6,) 72 | 73 | def test_localize_basic(): 74 | sv = np.ones(5) 75 | out = localize(sonic_vector=sv, x=0.1, y=0.1, sample_rate=10) 76 | assert out.shape[0] == 2 77 | assert out.shape[1] >= 5 78 | assert not np.allclose(out[0], out[1]) 79 | 80 | 81 | def test_loud_ramp_start_end(): 82 | sr = 100 83 | # Exponential ramp up by 6 dB 84 | env = loud(duration=0.1, trans_dev=6, method="exp", sample_rate=sr) 85 | assert len(env) == int(0.1 * sr) 86 | assert np.isclose(env[0], 1.0, atol=1e-6) 87 | assert np.isclose(env[-1], 10 ** (6 / 20), atol=1e-6) 88 | 89 | # Linear ramp down to zero 90 | lin_env = loud(duration=0.05, trans_dev=0, method="linear", sample_rate=sr) 91 | assert lin_env[0] == 1.0 92 | assert np.isclose(lin_env[-1], 0.0, atol=1e-6) 93 | 94 | 95 | def test_louds_concatenation_and_continuity(): 96 | sr = 100 97 | durations = (0.1, 0.2) 98 | devs = (6, -6) 99 | env = louds(durations=durations, trans_devs=devs, alpha=(1, 1), method=("exp", "exp"), sample_rate=sr) 100 | expected_len = int(sum(durations) * sr) 101 | assert len(env) == expected_len 102 | 103 | n1 = int(durations[0] * sr) 104 | assert np.isclose(env[n1 - 1], env[n1], atol=1e-6) 105 | assert np.isclose(env[0], 1.0, atol=1e-6) 106 | mid_amp = 10 ** (devs[0] / 20) 107 | assert np.isclose(env[n1 - 1], mid_amp, atol=1e-6) 108 | assert np.isclose(env[-1], 1.0, atol=1e-6) 109 | 110 | -------------------------------------------------------------------------------- /music/__init__.py: -------------------------------------------------------------------------------- 1 | """Top-level package for basic audio synthesis utilities.""" 2 | 3 | from .utils import ( 4 | amp_to_db, 5 | convert_to_stereo, 6 | db_to_amp, 7 | horizontal_stack, 8 | hz_to_midi, 9 | midi_to_hz_interval, 10 | midi_to_hz, 11 | mix_stereo, 12 | mix_with_offset_, 13 | mix_with_offset, 14 | mix, 15 | mix2, 16 | pan_transitions, 17 | pitch_to_freq, 18 | profile, 19 | resolve_stereo, 20 | rhythm_to_durations 21 | ) 22 | from .core import ( 23 | adsr_stereo, 24 | adsr_vibrato, 25 | adsr, 26 | am, 27 | cross_fade, 28 | fade, 29 | fir, 30 | gaussian_noise, 31 | iir, 32 | localize_linear, 33 | localize, 34 | localize2, 35 | loud, 36 | louds, 37 | noise, 38 | normalize_mono, 39 | normalize_stereo, 40 | note_with_doppler, 41 | note_with_fm, 42 | note_with_glissando_vibrato, 43 | note_with_glissando, 44 | note_with_phase, 45 | note_with_two_vibratos_glissando, 46 | note_with_two_vibratos, 47 | note_with_vibrato_seq_localization, 48 | note_with_vibrato, 49 | note_with_vibratos_glissandos, 50 | note, 51 | read_wav, 52 | reverb, 53 | silence, 54 | stretches, 55 | tremolo, 56 | tremolos, 57 | trill, 58 | write_wav_mono, 59 | write_wav_stereo, 60 | play_audio, 61 | ) 62 | from .tables import PrimaryTables 63 | from .structures import ( 64 | dist, 65 | GenericPeal, 66 | InterestingPermutations, 67 | Peals, 68 | PlainChanges, 69 | print_peal, 70 | transpose_permutation 71 | ) 72 | from .singing import get_engine, make_test_song, setup_engine 73 | from .legacy import Being, CanonicalSynth, IteratorSynth 74 | from .sequencer import Sequencer 75 | 76 | __all__ = [ 77 | 'adsr_stereo', 78 | 'adsr_vibrato', 79 | 'adsr', 80 | 'am', 81 | 'amp_to_db', 82 | 'Being', 83 | 'CanonicalSynth', 84 | 'convert_to_stereo', 85 | 'cross_fade', 86 | 'db_to_amp', 87 | 'dist', 88 | 'fade', 89 | 'fir', 90 | 'gaussian_noise', 91 | 'GenericPeal', 92 | 'get_engine', 93 | 'setup_engine', 94 | 'horizontal_stack', 95 | 'hz_to_midi', 96 | 'iir', 97 | 'InterestingPermutations', 98 | 'IteratorSynth', 99 | 'localize_linear', 100 | 'localize', 101 | 'localize2', 102 | 'loud', 103 | 'louds', 104 | 'make_test_song', 105 | 'midi_to_hz_interval', 106 | 'midi_to_hz', 107 | 'mix_stereo', 108 | 'mix_with_offset_', 109 | 'mix_with_offset', 110 | 'mix', 111 | 'mix2', 112 | 'noise', 113 | 'normalize_mono', 114 | 'normalize_stereo', 115 | 'note_with_doppler', 116 | 'note_with_fm', 117 | 'note_with_glissando_vibrato', 118 | 'note_with_glissando', 119 | 'note_with_phase', 120 | 'note_with_two_vibratos_glissando', 121 | 'note_with_two_vibratos', 122 | 'note_with_vibrato_seq_localization', 123 | 'note_with_vibrato', 124 | 'note_with_vibratos_glissandos', 125 | 'note', 126 | 'pan_transitions', 127 | 'Peals', 128 | 'pitch_to_freq', 129 | 'PlainChanges', 130 | 'PrimaryTables', 131 | 'print_peal', 132 | 'profile', 133 | 'read_wav', 134 | 'resolve_stereo', 135 | 'reverb', 136 | 'rhythm_to_durations', 137 | 'silence', 138 | 'stretches', 139 | 'transpose_permutation', 140 | 'tremolo', 141 | 'tremolos', 142 | 'trill', 143 | 'write_wav_mono', 144 | 'write_wav_stereo', 145 | 'play_audio', 146 | 'Sequencer' 147 | ] 148 | -------------------------------------------------------------------------------- /music/singing/perform.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """Utilities to synthesize singing from text using eCantorix.""" 3 | 4 | import os 5 | import re 6 | import warnings 7 | import logging 8 | import subprocess 9 | from scipy.io import wavfile 10 | from music.core import normalize_mono 11 | 12 | here = os.path.abspath(os.path.dirname(__file__)) 13 | ECANTORIXDIR = here + '/ecantorix' 14 | ECANTORIXCACHE = ECANTORIXDIR + '/cache' 15 | 16 | 17 | # def sing(text="ba-na-nin-ha pra vo-cê", 18 | def sing(text="Mar-ry had a litt-le lamb", 19 | notes=(4, 2, 0, 2, 4, 4, 4), durs=(1, 1, 1, 1, 1, 1, 2), 20 | M='4/4', L='1/4', Q=120, K='C', reference=60, 21 | lang='en', transpose=-36, effect=None): 22 | # lang='pt', transpose=-36, effect=None): 23 | # write abc file 24 | # write make file 25 | # convert file to midi 26 | # sing it out 27 | # reference -= 24 28 | write_abc(text, notes, durs, M=M, L=L, Q=Q, K=K, reference=reference) 29 | conf_text = '$ESPEAK_VOICE = "{}";\n'.format(lang) 30 | conf_text += '$ESPEAK_TRANSPOSE = {};'.format(transpose) 31 | if effect == 'flint': 32 | conf_text += "\ndo 'extravoices/flite.inc';" 33 | elif effect == 'tremolo': 34 | conf_text += "\ndo 'extravoices/tremolo.inc';" 35 | elif effect == 'melt': 36 | conf_text += "\ndo 'extravoices/melt.inc';" 37 | elif effect: 38 | raise ValueError('effect not understood') 39 | with open(ECANTORIXCACHE + '/achant.conf', 'w') as f: 40 | f.write(conf_text) 41 | # write conf file 42 | try: 43 | subprocess.run( 44 | [ 45 | 'cp', 46 | f'{ECANTORIXDIR}/Makefile', 47 | f'{ECANTORIXCACHE}/Makefile' 48 | ], 49 | check=True, 50 | ) 51 | subprocess.run(['make', '-C', ECANTORIXCACHE], check=True) 52 | except subprocess.CalledProcessError as exc: 53 | raise RuntimeError(f'Failed to build singing cache: {exc}') from exc 54 | wread = wavfile.read(ECANTORIXCACHE + '/achant.wav') 55 | assert wread[0] == 44100 56 | return normalize_mono(wread[1]) 57 | # return wread[1] 58 | 59 | 60 | def write_abc(text, notes, durs, M='4/4', L='1/4', Q=120, K='C', reference=60): 61 | text_ = 'X:1\n' 62 | text_ += 'T:Some chanting for music python package\n' 63 | text_ += 'M:{}\n'.format(M) 64 | text_ += 'L:{}\n'.format(L) 65 | text_ += 'Q:{}\n'.format(Q) 66 | text_ += 'V:1\n' 67 | text_ += 'K:{}\n'.format(K) 68 | notes = translate_to_abc(notes, durs, reference) 69 | text_ += notes + "\nw: " + text 70 | fname = ECANTORIXCACHE + "/achant.abc" 71 | with open(fname, 'w') as f: 72 | f.write(text_) 73 | 74 | 75 | def translate_to_abc(notes, durs, reference): 76 | durs = [str(i).replace('-', '/') for i in durs] 77 | durs = [i if i != '1' else '' for i in durs] 78 | notes = converter.convert(notes, reference) 79 | return ''.join([i + j for i, j in zip(notes, durs)]) 80 | 81 | 82 | class Notes: 83 | def __init__(self): 84 | self.notes_dict = None 85 | self.make_dict() 86 | 87 | def make_dict(self): 88 | notes = re.findall(r'[\^=]?[a-g]', '=c^c=d^de=f^f=g^g=a^ab') 89 | # notes=re.findall(r'[\^]{0,1}[a-g]{1}','a^abc^cd^def^fg^g') 90 | notes_ = [note.upper() for note in notes] 91 | notes__ = [note + "," for note in notes_] 92 | notes___ = [note + "," for note in notes__] 93 | notes____ = [note + "," for note in notes___] 94 | notes_u = [note + "'" for note in notes] 95 | notes__u = [note + "'" for note in notes_u] 96 | notes___u = [note + "'" for note in notes__u] 97 | notes_all = notes____ + notes___ + notes__ + notes_ + notes + \ 98 | notes_u + notes__u + notes___u 99 | self.notes_dict = dict([(i, j) for i, j in zip(range(12, 97), 100 | notes_all)]) 101 | 102 | def convert(self, notes, reference): 103 | if 'notes_dict' not in dir(self): 104 | self.make_dict() 105 | notes_ = [reference + note for note in notes] 106 | notes__ = [self.notes_dict[note] for note in notes_] 107 | return notes__ 108 | 109 | 110 | converter = Notes() 111 | 112 | if __name__ == '__main__': 113 | narray = sing() 114 | logging.info("finished") 115 | -------------------------------------------------------------------------------- /music/sequencer.py: -------------------------------------------------------------------------------- 1 | """Simple note sequencer built on Music primitives.""" 2 | from __future__ import annotations 3 | 4 | from dataclasses import dataclass, field 5 | from typing import List, Optional, Dict, Any 6 | 7 | import numpy as np 8 | 9 | from .core import synths 10 | from .core.filters import adsr 11 | from .core.filters.localization import localize 12 | from .utils import convert_to_stereo 13 | from .core.io import write_wav_mono, write_wav_stereo 14 | 15 | 16 | @dataclass 17 | class NoteEvent: 18 | """Represents a scheduled note.""" 19 | 20 | freq: float 21 | start: float 22 | duration: float 23 | vibrato_freq: float = 0.0 24 | max_pitch_dev: float = 0.0 25 | adsr_params: Optional[Dict[str, Any]] = None 26 | spatial: Optional[Dict[str, Any]] = None 27 | 28 | 29 | @dataclass 30 | class Sequencer: 31 | """Schedules notes and renders them as audio.""" 32 | 33 | sample_rate: int = 44100 34 | events: List[NoteEvent] = field(default_factory=list) 35 | 36 | def _mix_with_offset(self, base: np.ndarray, new: np.ndarray, start: float) -> np.ndarray: 37 | """Mix two sonic vectors with an offset in seconds.""" 38 | offset = int(round(start * self.sample_rate)) 39 | if base.ndim != new.ndim: 40 | if base.ndim == 1: 41 | base = convert_to_stereo(base) 42 | else: 43 | new = convert_to_stereo(new) 44 | 45 | if base.ndim == 1: 46 | final_len = max(len(base), offset + len(new)) 47 | out = np.zeros(final_len) 48 | out[: len(base)] += base 49 | out[offset : offset + len(new)] += new 50 | else: 51 | final_len = max(base.shape[1], offset + new.shape[1]) 52 | out = np.zeros((2, final_len)) 53 | out[:, : base.shape[1]] += base 54 | out[:, offset : offset + new.shape[1]] += new 55 | return out 56 | 57 | def add_note( 58 | self, 59 | freq: float, 60 | start: float, 61 | duration: float, 62 | vibrato_freq: float = 0.0, 63 | max_pitch_dev: float = 0.0, 64 | adsr_params: Optional[Dict[str, Any]] = None, 65 | spatial: Optional[Dict[str, Any]] = None, 66 | ) -> None: 67 | """Add a note event to the sequencer.""" 68 | self.events.append( 69 | NoteEvent( 70 | freq=freq, 71 | start=start, 72 | duration=duration, 73 | vibrato_freq=vibrato_freq, 74 | max_pitch_dev=max_pitch_dev, 75 | adsr_params=adsr_params, 76 | spatial=spatial, 77 | ) 78 | ) 79 | 80 | # internal synthesize 81 | def _render_event(self, event: NoteEvent) -> np.ndarray: 82 | if event.vibrato_freq and event.max_pitch_dev: 83 | note = synths.note_with_vibrato( 84 | freq=event.freq, 85 | duration=event.duration, 86 | vibrato_freq=event.vibrato_freq, 87 | max_pitch_dev=event.max_pitch_dev, 88 | sample_rate=self.sample_rate, 89 | ) 90 | else: 91 | note = synths.note( 92 | freq=event.freq, 93 | duration=event.duration, 94 | sample_rate=self.sample_rate, 95 | ) 96 | if event.adsr_params: 97 | note = adsr( 98 | sonic_vector=note, 99 | sample_rate=self.sample_rate, 100 | **event.adsr_params, 101 | ) 102 | if event.spatial: 103 | note = localize( 104 | sonic_vector=note, sample_rate=self.sample_rate, **event.spatial 105 | ) 106 | return note 107 | 108 | def render(self) -> np.ndarray: 109 | """Render all scheduled events and return the audio array.""" 110 | result: np.ndarray | None = None 111 | for event in sorted(self.events, key=lambda e: e.start): 112 | sound = self._render_event(event) 113 | if result is None: 114 | base = np.zeros((2, 0)) if sound.ndim == 2 else np.zeros(0) 115 | result = self._mix_with_offset(base, sound, event.start) 116 | else: 117 | result = self._mix_with_offset(result, sound, event.start) 118 | return np.array([]) if result is None else result 119 | 120 | def write(self, filename: str, bit_depth: int = 16) -> None: 121 | """Write the rendered audio to a WAV file.""" 122 | data = self.render() 123 | if data.ndim == 1: 124 | write_wav_mono(data, filename=filename, sample_rate=self.sample_rate, bit_depth=bit_depth) 125 | else: 126 | write_wav_stereo( 127 | data, filename=filename, sample_rate=self.sample_rate, bit_depth=bit_depth 128 | ) 129 | 130 | 131 | __all__ = ["Sequencer", "NoteEvent"] 132 | 133 | -------------------------------------------------------------------------------- /music/core/filters/fade.py: -------------------------------------------------------------------------------- 1 | """Amplitude fade filters for smooth transitions.""" 2 | 3 | import numpy as np 4 | from .loud import loud 5 | from ...utils import resolve_stereo, mix_with_offset 6 | 7 | 8 | def fade(duration=2, fade_out=True, method="exp", db=-80, alpha=1, perc=1, 9 | number_of_samples=0, sonic_vector=0, sample_rate=44100): 10 | """ 11 | A fade in or out. 12 | 13 | Implements the loudness transition and asserts that it reaches zero 14 | amplitude. 15 | 16 | Parameters 17 | ---------- 18 | duration : scalar 19 | The duration in seconds of the fade. 20 | fade_out : boolean 21 | If True, the fade is a fade out, else it is a fade in. 22 | method : string 23 | "exp" for an exponential transition of amplitude (linear loudness). 24 | "linear" for a linear transition of amplitude. 25 | db : scalar 26 | The decibels from which to reach before using the linear transition to 27 | reach zero. Not used if method="linear". 28 | alpha : scalar 29 | An index to make the exponential fade slower or faster [1]. Ignored if 30 | transitions="linear". 31 | perc : scalar 32 | The percentage of the fade that is linear to ensure it reaches zero. 33 | It has no effect if method="linear". 34 | number_of_samples : integer 35 | The number of samples of the fade. If supplied, d is ignored. 36 | sonic_vector : array_like 37 | Samples for the fade to be applied to. If supplied, d and nsamples are 38 | ignored. 39 | sample_rate : integer 40 | The sample rate. Only used if number_of_samples and sonic_vector are 41 | not supplied. 42 | 43 | Returns 44 | ------- 45 | ai : ndarray 46 | Each value is a value of the envelope for the PCM samples. If 47 | sonic_vector is input, ai is the sonic vector with the fade applied to 48 | it. 49 | 50 | See Also 51 | -------- 52 | adsr : An ADSR envelope. 53 | loud : A transition of loudness. 54 | louds : An envelope with an arbitrary number or loudness transitions. 55 | tremolo : An oscillation of loudness. 56 | 57 | Examples 58 | -------- 59 | >>> write_wav_mono(note_with_vibrato() * fade()) 60 | >>> s = horizontal_stack([note_with_vibrato() * fade(fade_out=i, method=j) 61 | ... for i, j in zip([1, 0, 1], 62 | ... ["exp", "exp", "linear"])]) 63 | >>> s = horizontal_stack([fade(fade_out=i, method=j, sonic_vector=V()) 64 | ... for i, j in zip([1, 0, 1], 65 | ... ["exp", "exp", "linear"])]) 66 | >>> envelope = fade(duration=10, fade_out=0, perc=0.1) 67 | 68 | Notes 69 | ----- 70 | Cite the following article whenever you use this function. 71 | 72 | References 73 | ---------- 74 | .. [1] Fabbri, Renato, et al. "Musical elements in the discrete-time 75 | representation of sound." arXiv preprint arXiv:abs/1412.6853 (2017) 76 | 77 | """ 78 | if type(sonic_vector) in (np.ndarray, list): 79 | if len(sonic_vector.shape) == 2: 80 | return resolve_stereo(fade, locals()) 81 | n = len(sonic_vector) 82 | elif number_of_samples: 83 | n = number_of_samples 84 | else: 85 | n = int(sample_rate * duration) 86 | if 'lin' in method: 87 | if fade_out: 88 | ai = loud(method="linear", trans_dev=0, number_of_samples=n) 89 | else: 90 | ai = loud(method="linear", to=0, trans_dev=0, number_of_samples=n) 91 | if 'exp' in method: 92 | n0 = int(n*perc/100) 93 | n1 = n - n0 94 | if fade_out: 95 | ai1 = loud(trans_dev=db, alpha=alpha, number_of_samples=n1) 96 | if n0: 97 | ai0 = loud(method="linear", trans_dev=0, 98 | number_of_samples=n0) * ai1[-1] 99 | else: 100 | ai0 = [] 101 | ai = np.hstack((ai1, ai0)) 102 | else: 103 | ai1 = loud(trans_dev=db, to=0, alpha=alpha, number_of_samples=n1) 104 | if n0: 105 | ai0 = loud(method="linear", to=0, trans_dev=0, 106 | number_of_samples=n0) * ai1[0] 107 | else: 108 | ai0 = [] 109 | ai = np.hstack((ai0, ai1)) 110 | if type(sonic_vector) in (np.ndarray, list): 111 | return ai*sonic_vector 112 | else: 113 | return ai 114 | 115 | 116 | def cross_fade(sonic_vector_1, sonic_vector_2, duration=500, method='lin', 117 | sample_rate=44100): 118 | """ 119 | Cross fade in duration milisseconds. 120 | 121 | """ 122 | ns = int(duration * sample_rate / 1000) 123 | if len(sonic_vector_1.shape) != len(sonic_vector_2.shape): 124 | raise ValueError('sonic_vector_1 and sonic_vector_2 must have the same shape') 125 | if len(sonic_vector_1.shape) == 2: 126 | s1_ = cross_fade(sonic_vector_1[0], sonic_vector_2[0], duration, 127 | method, sample_rate) 128 | s2_ = cross_fade(sonic_vector_1[1], sonic_vector_2[1], duration, 129 | method, sample_rate) 130 | s = np.array((s1_, s2_)) 131 | return s 132 | sonic_vector_1[-ns:] *= fade(number_of_samples=ns, method=method, 133 | sample_rate=sample_rate) 134 | sonic_vector_2[:ns] *= fade(number_of_samples=ns, method=method, 135 | sample_rate=sample_rate, fade_out=False) 136 | s = mix_with_offset(sonic_vector_1, sonic_vector_2, 137 | duration=-duration / 1000) 138 | return s 139 | -------------------------------------------------------------------------------- /music/core/synths/noises.py: -------------------------------------------------------------------------------- 1 | """Module for the synthesis of noises and silences.""" 2 | from numbers import Number 3 | import numpy as np 4 | import music 5 | 6 | 7 | def noise(noise_type="brown", duration=2, min_freq=15, max_freq=15000, 8 | number_of_samples=0, sample_rate=44100): 9 | """ 10 | Return a colored or user-refined noise. 11 | 12 | Parameters 13 | ---------- 14 | noise_type : string or scalar 15 | Specifies the decibels gain or attenuation per octave. It can be 16 | specified numerically (e.g. ntype=3.5 is 3.5 decibels gain per octave) 17 | or by strings: 18 | - "brown" is -6dB/octave 19 | - "pink" is -3dB/octave 20 | - "white" is 0dB/octave 21 | - "blue" is 3dB/octave 22 | - "violet" is 6dB/octave 23 | - "black" is -12/dB/octave but, in theory, is any < -6dB/octave 24 | See [1] for more information. 25 | duration : scalar 26 | The duration of the noise in seconds. 27 | min_freq : scalar in [0, fs/2] 28 | The lowest frequency allowed. 29 | max_freq : scalar in [0, fs/2] 30 | The highest frequency allowed. 31 | It should be > fmin. 32 | number_of_samples : integer 33 | The number of samples of the resulting sonic vector. 34 | sample_rate : integer 35 | The sample rate to use, by default 44100. 36 | 37 | Notes 38 | ----- 39 | The noise is synthesized with components with random phases, with the 40 | moduli that are related to the decibels/octave, and with a frequency 41 | resolution of fs / nsamples = fs / (fs*d) = 1/d Hz 42 | 43 | Cite the following article whenever you use this function. 44 | 45 | References 46 | ---------- 47 | .. [1] Fabbri, Renato, et al. "Musical elements in the discrete-time 48 | representation of sound." arXiv preprint arXiv:abs/1412.6853 (2017) 49 | 50 | """ 51 | if number_of_samples: 52 | length = number_of_samples 53 | else: 54 | length = int(duration * sample_rate) 55 | if noise_type == "white": 56 | prog = 0 57 | elif noise_type == "pink": 58 | prog = -3 59 | elif noise_type == "brown": 60 | prog = -6 61 | elif noise_type == "blue": 62 | prog = 3 63 | elif noise_type == "violet": 64 | prog = 6 65 | elif noise_type == "black": 66 | prog = -12 67 | elif isinstance(noise_type, Number): 68 | prog = noise_type 69 | else: 70 | raise ValueError( 71 | "Set ntype to a number or one of the following strings: " 72 | "'white', 'pink', 'brown', 'blue', 'violet', 'black'. " 73 | "Check docstring for more information.") 74 | 75 | coeffs = np.zeros(length, dtype=complex) 76 | coeffs[:length // 2] = np.exp(1j * 77 | np.random.uniform(0, 2 * np.pi, length // 2)) 78 | if length % 2 == 0: 79 | coeffs[length // 2] = 1. 80 | 81 | freq_res = sample_rate / length 82 | first_coeff = int(np.floor(min_freq / freq_res)) 83 | first_coeff = max(1, first_coeff) 84 | last_coeff = int(np.floor(max_freq / freq_res)) 85 | coeffs[:first_coeff] = 0 86 | coeffs[last_coeff:] = 0 87 | 88 | factor = 10. ** (prog / 20.) 89 | freq_i = np.arange(coeffs.shape[0]) * freq_res 90 | denom = max(min_freq, freq_res) 91 | freqs = np.clip(freq_i[first_coeff:last_coeff], freq_res, None) 92 | attenuation_factors = factor ** (np.log2(freqs / denom)) 93 | coeffs[first_coeff:last_coeff] *= attenuation_factors 94 | 95 | if length % 2 == 0: 96 | high_freq_conj_coeffs = np.conj(coeffs[1:length // 2][::-1]) 97 | coeffs[length // 2 + 1:] = high_freq_conj_coeffs 98 | else: 99 | high_freq_conj_coeffs = np.conj(coeffs[1:length // 2][::-1]) 100 | coeffs[length // 2 + 1:-1] = high_freq_conj_coeffs 101 | 102 | noise_vector = np.fft.ifft(coeffs).real 103 | return music.core.normalize_mono(noise_vector) 104 | 105 | 106 | def gaussian_noise(mean=1, std=0.5, duration=2, sample_rate=44100): 107 | """Synth gaussian noise 108 | 109 | Parameters 110 | ---------- 111 | mean : int, optional 112 | _description_, by default 1 113 | std : float, optional 114 | _description_, by default 0.5 115 | duration : int, optional 116 | How long in seconds will the noise be, by default 2 117 | sample_rate : int, optional 118 | The sample rate to use, by default 44100 119 | 120 | Returns 121 | ------- 122 | array 123 | An array for the gaussian noise 124 | """ 125 | 126 | length = duration * sample_rate 127 | freq_res = sample_rate / float(length) 128 | coeffs = np.exp(1j * np.random.uniform(0, 2 * np.pi, length)) 129 | coeffs[length // 2 + 1:] = np.real(coeffs[1:length // 2])[::-1] - 1j * \ 130 | np.imag(coeffs[1:length // 2])[::-1] 131 | coeffs[0] = 0. # sem bias 132 | if length % 2 == 0: 133 | coeffs[length // 2] = 0. 134 | f1 = (mean - std / 2) * 3000 135 | f2 = (mean + std / 2) * 3000 136 | first_coeff = int(np.floor(f1 / freq_res)) 137 | last_coeff = int(np.floor(f2 / freq_res)) 138 | coeffs[:first_coeff] = np.zeros(first_coeff) 139 | coeffs[last_coeff:] = np.zeros(len(coeffs[last_coeff:])) 140 | 141 | # obtenção do ruído em suas amostras temporais 142 | noise_vector = np.real(np.fft.ifft(coeffs)) 143 | noise_vector = ((noise_vector - noise_vector.min()) / 144 | (noise_vector.max() - noise_vector.min())) * 2 - 1 145 | 146 | # fazer tre_freq variar conforme measures2 147 | return music.core.normalize_mono(noise_vector) 148 | 149 | 150 | def silence(duration=1.0, sample_rate=44100): 151 | """Generate a silence of specified length. 152 | 153 | Parameters 154 | ---------- 155 | duration : int, optional 156 | How many seconds will silence last, by default 1 157 | sample_rate : int, optional 158 | The sample rate to use, by default 44100 159 | 160 | Returns 161 | ------- 162 | array 163 | An array with no sound 164 | """ 165 | 166 | return np.zeros(int(duration * sample_rate)) 167 | -------------------------------------------------------------------------------- /music/core/filters/adsr.py: -------------------------------------------------------------------------------- 1 | """Amplitude envelope filters including ADSR-related helpers.""" 2 | 3 | import numpy as np 4 | from .fade import fade 5 | from .loud import loud 6 | from ..synths.notes import note_with_vibrato 7 | 8 | 9 | def adsr(envelope_duration=2, attack_duration=20, 10 | decay_duration=20, sustain_level=-5, 11 | release_duration=50, transition="exp", alpha=1, 12 | db_dev=-80, to_zero=1, number_of_samples=0, sonic_vector=0, 13 | sample_rate=44100): 14 | """ 15 | Synthesize an ADSR envelope. 16 | 17 | ADSR (Atack, Decay, Sustain, Release) is a very traditional loudness 18 | envelope in sound synthesis [1]. 19 | 20 | Parameters 21 | ---------- 22 | envelope_duration : scalar 23 | The duration of the envelope in seconds. 24 | attack_duration : scalar 25 | The duration of the Attack in milliseconds. 26 | decay_duration : scalar 27 | The duration of the Decay in milliseconds. 28 | sustain_level : scalar 29 | The Sustain level after the Decay in decibels. 30 | Usually negative. 31 | release_duration : scalar 32 | The duration of the Release in milliseconds. 33 | transition : string 34 | "exp" for exponential transitions of amplitude 35 | (linear loudness). 36 | "linear" for linear transitions of amplitude. 37 | alpha : scalar or array_like 38 | An index to make the exponential fade slower or faster [1]. Ignored it 39 | transitions="linear" or alpha=1. If it is an array_like, it should 40 | hold three values to be used in Attack, Decay and Release. 41 | db_dev : scalar or array_like 42 | The decibels deviation to reach before using a linear fade to reach 43 | zero amplitude. If it is an array_like, it should hold two values, one 44 | for Attack and another for Release. Ignored if trans="linear". 45 | to_zero : scalar or array_like 46 | The duration in milliseconds for linearly departing from zero in the 47 | Attack and reaching the value of zero at the end of the Release. If it 48 | is an array_like, it should hold two values, one for Attack and 49 | another for Release. It's ignored if trans="linear". 50 | number_of_samples : integer 51 | The number of samples of the envelope. If supplied, d is ignored. 52 | sonic_vector : array_like 53 | Samples for the ADSR envelope to be applied to. If supplied, d and 54 | nsamples are ignored. 55 | sample_rate : integer 56 | The sample rate. 57 | 58 | Returns 59 | ------- 60 | as : ndarray 61 | A numpy array where each value is a value of the envelope for the PCM 62 | samples if sonic_vector is 0. If sonic_vector is input, ad is the 63 | sonic vector with the ADSR envelope applied to it. 64 | 65 | See Also 66 | -------- 67 | tremolo : An oscillation of loudness. 68 | loud : A loudness transition. 69 | fade : A fade in or fade out. 70 | 71 | Examples 72 | -------- 73 | >>> write_wav_mono(note_with_vibrato() * adsr()) 74 | >>> s = horizontal_stack([note_with_vibrato() * 75 | ... adsr(attack_duration=i, release_duration=j) 76 | ... for i, j in zip([6, 50, 300], [100, 10, 200])]) 77 | >>> s = horizontal_stack([adsr(A=i, R=j, sonic_vector=note_with_vibrato()) 78 | ... for i, j in zip([6, 15, 100], [2, 2, 20])]) 79 | >>> envelope = adsr(d=440, A=10e3, D=0, R=5e3) 80 | 81 | Notes 82 | ----- 83 | Cite the following article whenever you use this function. 84 | 85 | References 86 | ---------- 87 | .. [1] Fabbri, Renato, et al. "Musical elements in the discrete-time 88 | representation of sound." arXiv preprint arXiv:abs/1412.6853 (2017) 89 | 90 | """ 91 | if type(sonic_vector) in (np.ndarray, list): 92 | lambda_adsr = len(sonic_vector) 93 | elif number_of_samples: 94 | lambda_adsr = number_of_samples 95 | else: 96 | lambda_adsr = int(envelope_duration * sample_rate) 97 | lambda_a = int(attack_duration * sample_rate * 0.001) 98 | lambda_d = int(decay_duration * sample_rate * 0.001) 99 | lambda_r = int(release_duration * sample_rate * 0.001) 100 | 101 | perc = to_zero / attack_duration 102 | attack_duration = fade(fade_out=0, method=transition, alpha=alpha, 103 | db=db_dev, perc=perc, number_of_samples=lambda_a) 104 | 105 | decay_duration = loud(trans_dev=sustain_level, method=transition, 106 | alpha=alpha, number_of_samples=lambda_d) 107 | 108 | a_s = 10 ** (sustain_level / 20.) 109 | sustain_level = np.ones(lambda_adsr - 110 | (lambda_a + lambda_r + lambda_d)) * a_s 111 | 112 | perc = to_zero / release_duration 113 | release_duration = fade(method=transition, alpha=alpha, db=db_dev, 114 | perc=perc, number_of_samples=lambda_r) * a_s 115 | 116 | ad = np.hstack((attack_duration, decay_duration, sustain_level, 117 | release_duration)) 118 | if type(sonic_vector) in (np.ndarray, list): 119 | return sonic_vector * ad 120 | else: 121 | return ad 122 | 123 | 124 | def adsr_vibrato(note_dict={}, adsr_dict={}): 125 | """ 126 | Creates a note with a vibrato and an ADSR envelope. 127 | 128 | Check the adsr and the note_with_vibrato functions. 129 | 130 | """ 131 | return adsr(sonic_vector=note_with_vibrato(**note_dict), **adsr_dict) 132 | 133 | 134 | def adsr_stereo(duration=2, attack_duration=20, decay_duration=20, 135 | sustain_level=-5, release_duration=50, transition="exp", 136 | alpha=1, db_dev=-80, to_zero=1, number_of_samples=0, 137 | sonic_vector=0, sample_rate=44100): 138 | """ 139 | A shorthand to make an ADSR envelope for a stereo sound. 140 | 141 | See adsr() for more information. 142 | 143 | """ 144 | if type(sonic_vector) in (np.ndarray, list): 145 | sonic_vector1 = sonic_vector[0] 146 | sonic_vector2 = sonic_vector[1] 147 | else: 148 | sonic_vector1 = 0 149 | sonic_vector2 = 0 150 | s1 = adsr(envelope_duration=duration, attack_duration=attack_duration, 151 | decay_duration=decay_duration, sustain_level=sustain_level, 152 | release_duration=release_duration, transition=transition, 153 | alpha=alpha, db_dev=db_dev, to_zero=to_zero, 154 | number_of_samples=number_of_samples, sonic_vector=sonic_vector1, 155 | sample_rate=sample_rate) 156 | s2 = adsr(envelope_duration=duration, attack_duration=attack_duration, 157 | decay_duration=decay_duration, sustain_level=sustain_level, 158 | release_duration=release_duration, transition=transition, 159 | alpha=alpha, db_dev=db_dev, to_zero=to_zero, 160 | number_of_samples=number_of_samples, sonic_vector=sonic_vector2, 161 | sample_rate=sample_rate) 162 | s = np.vstack((s1, s2)) 163 | return s 164 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Music 2 | 3 | Music is a python package to generate and manipulate music and sounds. It's written using the [MASS (Music and Audio in Sample Sequences)](https://github.com/ttm/mass/) framework, a collection of psychophysical descriptions of musical elements in LPCM audio through equations and corresponding Python routines. 4 | 5 | To have a further understanding of the routines you can read the article 6 | [Musical elements in the discrete-time representation of sound](https://github.com/ttm/mass/raw/master/doc/article.pdf). 7 | 8 | If you use this package, please cite the forementioned article. 9 | 10 | ## Core features 11 | 12 | The precision of Music makes it the perfect choice for many scientific uses. At its core there are a few important features: 13 | 14 | * **Sample-based synth**, meaning that the state is updated at each sample. For example, when we have a note with a vibrato, each sample is associated to a different frequency. By doing this the synthesized sound is the closest it can be to the mathematical model that describes it. 15 | * **Musical structures** with emphasis in symmetry and discourse. 16 | 17 | * **play_audio** utility to listen to generated sounds without saving files. 18 | Music can be used alone or with other packages, and it's ideal for audiovisualization of data. For example, it can be used with [Percolation](https://github.com/ttm/percolation) and [Participation](https://github.com/ttm/participation) for harnessing open linked social data, or with [audiovisual analytics vocabulary and ontology (AAVO)](https://github.com/ttm/aavo). 19 | 20 | ## How to install 21 | 22 | To install music you can either install it directly with `pip`: 23 | 24 | ```console 25 | pip3 install music 26 | ``` 27 | 28 | or you can clone this repository and install it from there: 29 | 30 | ```console 31 | git clone https://github.com/ttm/music.git 32 | pip3 install -e 33 | ``` 34 | 35 | This install method is especially useful when reloading the modified module in subsequent runs of music, and for greater control of customization, hacking and debugging. 36 | 37 | ### Dependencies 38 | 39 | Every dependency is installed by default by `pip`, but you can take a look at [requirements.txt](https://github.com/ttm/music/blob/master/requirements.txt). 40 | 41 | ### Testing 42 | 43 | The packages required to run the test suite are available via the `dev` 44 | extras defined in `pyproject.toml`. Install them with: 45 | 46 | ```console 47 | pip install -e '.[dev]' 48 | ``` 49 | 50 | You can then run the tests using `pytest`: 51 | 52 | ```console 53 | pytest 54 | ``` 55 | 56 | ### Type checking 57 | 58 | Install the development dependencies and run `mypy` to perform static type 59 | analysis: 60 | 61 | ```console 62 | pip install -e '.[dev]' 63 | mypy music 64 | ``` 65 | 66 | ## Examples 67 | 68 | Inside [the examples folder](https://github.com/ttm/music/tree/master/examples) you can find some scripts that use the main features of Music. 69 | 70 | * [chromatic_scale](https://github.com/ttm/music/tree/master/examples/chromatic_scale.py): writes twelve notes into a WAV file from a sequence of frequencies. 71 | * [penta_effects](https://github.com/ttm/music/tree/master/examples/chromatic_scale.py): writes a pentatonic scale repeated once clean, once with pitch, one with vibrato, one with Doppler, and one with FM, into a WAV stereo file. 72 | * [noisy](https://github.com/ttm/music/tree/master/examples/noisy.py): writes into a WAV file a sequence of different noises. 73 | * [thirty_notes](https://github.com/ttm/music/tree/master/examples/thirty_notes.py) and [thirty_numpy_notes](https://github.com/ttm/music/tree/master/examples/thirty_numpy_notes.py) generate a sequence of sounds by using a synth class (in this case the class [`Being`](https://github.com/ttm/music/tree/master/music/legacy/classes.py)). 74 | * [campanology](https://github.com/ttm/music/tree/master/examples/campanology.py) and [geometric_music](https://github.com/ttm/music/tree/master/examples/geometric_music.py) both use `Being` as their synth, but this time with permutations. 75 | * [isynth](https://github.com/ttm/music/tree/master/examples/isynth.py) also uses a synth class, but of a different kind, [`IteratorSynth`](https://github.com/ttm/music/tree/master/music/legacy/classes.py), that iterates through arbitrary lists of variables. 76 | * [singing_demo](https://github.com/ttm/music/tree/master/examples/singing_demo.py): demonstrates `music.singing.setup_engine()` and `music.singing.make_test_song()` to render a short sung phrase. 77 | * [binaural_beats](https://github.com/ttm/music/tree/master/examples/binaural_beats.py): generates binaural beats using two pure tones with tremolo for relaxation or focus. 78 | * The `music.singing` module provides basic text-to-speech utilities. Run `music.singing.setup_engine()` once to clone the eCantorix engine before using these features. 79 | 80 | ## Package structure 81 | 82 | The modules are: 83 | 84 | * **core**: 85 | * **synths** for synthesization of notes (including vibratos, glissandos, etc.), noises and envelopes. 86 | * **filters** for the application of filters such as ADSR envelopes, fades, IIR and FIR, reverb, loudness, and localization. 87 | * **io** for reading, writing and playing audio, both mono and stereo. 88 | * **functions** for normalization. 89 | * **structures** for higher level musical structures such as permutations (and related to algebraic groups and change ringing peals), scales, chords, counterpoint, tunings, etc. 90 | * **legacy** for musical pieces that are rendered with the Music package and might be used as material to make more music. 91 | * **tables** for the generation of lookup tables for some basic waveform. 92 | * **utils** for various functions regarding conversions, mix, etc. 93 | * **sequencer** for scheduling notes into a timeline and exporting audio. 94 | 95 | ## Roadmap 96 | 97 | Music is stable but still very young. We didn't have the opportunity yet to make Music all we want it to be. 98 | 99 | Here is one example of what we're aiming at: 100 | 101 | ```python 102 | import music 103 | 104 | music.render_demos() # render some wav files in ./ 105 | 106 | music.legacy.experiments.cristal2(.2, 300) # wav of sonic structure in ./ 107 | 108 | sound_waves = music.legacy.songs.madame_z(render=False) # return numpy array 109 | 110 | sound_waves2 = music.core.io.open("demosong2.wav") # numpy array 111 | 112 | music = music.remix(sound_waves, soundwaves2) 113 | music_ = music.horizontal_stack(sound_waves[:44100*2], music[len(music)/2::2]) 114 | 115 | music.core.io.write_wav_mono(music_) 116 | 117 | ``` 118 | 119 | ## Coding conventions 120 | 121 | The code follows [PEP 8 conventions](https://peps.python.org/pep-0008/). 122 | 123 | For a better understanding of each function, the math behind it and see examples of their use, you can read their docstring. 124 | 125 | ## Further information 126 | 127 | Music is primarily intended for artistic use, but was also designed to run psychophysics experiments and data sonification. 128 | 129 | You can find an example in [Versinus](https://github.com/ttm/versinus), an animated visualization method for evolving networks that uses Music to render the musical track that represents networks structures. 130 | 131 | ::: 132 | -------------------------------------------------------------------------------- /music/structures/peals/plain_changes.py: -------------------------------------------------------------------------------- 1 | """ 2 | Present plain changes as swaps and act in domains to make peals. 3 | 4 | Reference: 5 | - http://www.gutenberg.org/files/18567/18567-h/18567-h.htm 6 | """ 7 | 8 | import sympy 9 | import warnings 10 | 11 | 12 | class PlainChanges: 13 | """ 14 | Presents plain changes as swaps and acts in domains to make peals. 15 | """ 16 | 17 | def __init__(self, nelements=4, nhunts=None, hunts=None): 18 | """ 19 | Initializes a PlainChanges object. 20 | 21 | Parameters: 22 | nelements (int, optional): The number of elements. Defaults to 4. 23 | nhunts (int, optional): The number of hunts. Defaults to None. 24 | hunts (dict, optional): The hunts dictionary. Defaults to None. 25 | 26 | Raises: 27 | ValueError: If the number of hunts is invalid. 28 | """ 29 | self.peal_direct = None 30 | self.peal_sequence = None 31 | self.domain = None 32 | self.acted_peals = None 33 | self.peals = None 34 | hunts = self.initialize_hunts(nelements, nhunts) 35 | self.neutral_perm = sympy.combinatorics.Permutation([0], 36 | size=nelements) 37 | self.neighbor_swaps = [ 38 | sympy.combinatorics.Permutation(i, i + 1, size=nelements) 39 | for i in range(nelements - 1)] 40 | self.domains = [] 41 | self.perform_peal(nelements, dict(hunts)) 42 | self.hunts = hunts 43 | self.nelements = nelements 44 | 45 | def initialize_hunts(self, nelements=4, nhunts=None): 46 | """ 47 | Initializes the hunts dictionary. 48 | 49 | Parameters: 50 | nelements (int, optional): The number of elements. Defaults to 4. 51 | nhunts (int, optional): The number of hunts. Defaults to None. 52 | 53 | Returns: 54 | dict: The hunts dictionary. 55 | 56 | Raises: 57 | ValueError: If the number of hunts is invalid. 58 | """ 59 | if not nhunts: 60 | if nelements > 4: 61 | nhunts = 2 62 | else: 63 | nhunts = 1 64 | assert nelements > 0 65 | if nhunts > nelements: 66 | raise ValueError("There cannot be more hunts than elements") 67 | elif nhunts > nelements - 3: 68 | warnings.warn( 69 | f"peals are the same if there are {nhunts - (nelements - 3)} " 70 | "hunts less") 71 | hunts_dict = {} 72 | for hunt in range(nhunts): 73 | if hunt == nhunts - 1: 74 | next_ = None 75 | else: 76 | next_ = "hunt" + str(hunt + 1) 77 | hunts_dict["hunt" + str(hunt)] = dict(level=hunt, position=hunt, 78 | status="started", 79 | direction="up", next_=next_) 80 | return hunts_dict 81 | 82 | def perform_peal(self, nelements, hunts=None): 83 | """ 84 | Performs a peal. 85 | 86 | Parameters: 87 | nelements (int): The number of elements. 88 | hunts (dict, optional): The hunts dictionary. Defaults to None. 89 | 90 | Returns: 91 | dict: The updated hunts dictionary. 92 | """ 93 | if hunts is None: 94 | hunts = self.initialize_hunts(nelements) 95 | permutation, hunts = self.perform_change(nelements, hunts) 96 | total_perm = permutation 97 | peal_direct = [self.neutral_perm] 98 | peal_sequence = [permutation] 99 | while total_perm != self.neutral_perm: 100 | peal_direct += [total_perm] 101 | permutation, hunts = self.perform_change(nelements, hunts) 102 | total_perm = permutation * total_perm 103 | peal_sequence += [permutation] 104 | self.peal_direct = peal_direct 105 | self.peal_sequence = peal_sequence 106 | return hunts 107 | 108 | def perform_change(self, nelements, hunts, hunt=None): 109 | """ 110 | Performs a change procedure. 111 | 112 | Parameters: 113 | nelements (int): The number of elements. 114 | hunts (dict): The hunts dictionary. 115 | hunt (str, optional): The current hunt. Defaults to None. 116 | 117 | Returns: 118 | Permutation: The permutation of the change. 119 | dict: The updated hunts dictionary. 120 | """ 121 | if hunt is None: 122 | hunt = "hunt0" 123 | hunt_ = hunts[hunt] 124 | direction = hunt_["direction"] 125 | assert direction in {"up", "down"} 126 | position = hunt_["position"] 127 | swap_with = (position - 1, position + 1)[direction == "up"] 128 | cut_bellow = sum([hunts["hunt" + str(i)]["direction"] == "up" 129 | for i in range(hunt_["level"])]) 130 | cut_above = nelements - (hunt_["level"] - cut_bellow) 131 | domain = list(range(nelements))[cut_bellow:cut_above] 132 | self.domains += [(domain, cut_bellow, cut_above, hunt_["level"], hunt, 133 | position, swap_with)] 134 | if swap_with in domain: 135 | swap = self.neighbor_swaps[(position - 1, position) 136 | [direction == "up"]] 137 | for ahunt in hunts: 138 | if hunts[ahunt]["position"] == swap_with: 139 | hunts[ahunt]["position"] = position 140 | hunts[hunt]["position"] = swap_with 141 | else: 142 | new_direction = ("up", "down")[direction == "up"] 143 | hunts[hunt]["direction"] = new_direction 144 | self.domains += ["invert", new_direction, hunt] 145 | if hunt_["next_"] is None: 146 | swap = self.neighbor_swaps[(domain[0], domain[-2]) 147 | [new_direction == "up"]] 148 | else: 149 | subsequent_hunt = hunt_["next_"] 150 | swap, hunts = self.perform_change(nelements, hunts, 151 | subsequent_hunt) 152 | self.domains += [swap] 153 | return swap, hunts 154 | 155 | def act(self, domain=None, peal=None): 156 | """ 157 | Acts in a domain using a peal. 158 | 159 | Parameters: 160 | domain (list, optional): The domain. Defaults to None. 161 | peal (list, optional): The peal. Defaults to None. 162 | 163 | Returns: 164 | list: The acted domain. 165 | """ 166 | if domain is None: 167 | domain = list(range(self.nelements)) 168 | if peal is None: 169 | peal = self.peal_direct 170 | return [i(domain) for i in peal] 171 | 172 | def act_all(self, domain=None): 173 | """ 174 | Acts in all peals using a domain. 175 | 176 | Parameters: 177 | domain (list, optional): The domain. Defaults to None. 178 | """ 179 | if domain is None: 180 | domain = list(range(self.nelements)) 181 | acted_peals = {} 182 | for peal in self.peals: 183 | acted_peals[peal + "_acted"] = [i(domain) 184 | for i in self.peals[peal]] 185 | self.domain = domain 186 | self.acted_peals = acted_peals 187 | -------------------------------------------------------------------------------- /music/core/io.py: -------------------------------------------------------------------------------- 1 | """Utilities for reading and writing WAV files.""" 2 | 3 | import logging 4 | from typing import Sequence 5 | 6 | import numpy as np 7 | from numpy.typing import ArrayLike, NDArray 8 | from scipy.io import wavfile 9 | from .functions import normalize_mono, normalize_stereo 10 | from .filters import adsr, adsr_stereo 11 | 12 | SONIC_VECTOR_MONO = np.random.uniform(size=100000) 13 | SONIC_VECTOR_STEREO = np.vstack((np.random.uniform(size=100000), 14 | np.random.uniform(size=100000))) 15 | 16 | 17 | def read_wav(filename: str) -> NDArray[np.float64]: 18 | """Reads a WAV file and return an array of its values. 19 | 20 | Parameters 21 | ---------- 22 | filename : string 23 | File name 24 | 25 | Returns 26 | ------- 27 | NDArray 28 | Values of the WAV file 29 | """ 30 | sample_rate, data = wavfile.read(filename) 31 | logging.debug("read_wav dtype %s", data.dtype) 32 | 33 | if np.issubdtype(data.dtype, np.integer): 34 | bits = np.iinfo(data.dtype).bits 35 | if bits not in (8, 16, 32): 36 | raise ValueError( 37 | f"unsupported integer WAV bit depth: {bits}" 38 | ) 39 | norm = float(2 ** (bits - 1)) 40 | elif np.issubdtype(data.dtype, np.floating): 41 | norm = float(np.max(np.abs(data))) 42 | if norm == 0: 43 | norm = 1.0 44 | else: 45 | raise ValueError(f"unsupported WAV data type: {data.dtype}") 46 | 47 | if data.ndim == 2: 48 | return data.astype(np.float64).T / norm 49 | return data.astype(np.float64) / norm 50 | 51 | 52 | def write_wav_mono( 53 | sonic_vector: ArrayLike = SONIC_VECTOR_MONO, 54 | filename: str = "asound.wav", 55 | sample_rate: int = 44100, 56 | fades: Sequence[int] | int = 0, 57 | bit_depth: int = 16, 58 | remove_bias: bool = True, 59 | ) -> None: 60 | """Writes a mono WAV file for a numpy array. 61 | 62 | One can also use, for example: 63 | import sounddevice as S 64 | S.play(__n(array)) 65 | 66 | Parameters 67 | ---------- 68 | sonic_vector : array_like 69 | The PCM samples to be written as a WAV sound file. The samples are 70 | always normalized by normalize_mono(sonic_vector) to have samples 71 | between -1 and 1. 72 | filename : string 73 | The filename to use for the file to be written. 74 | sample_rate : scalar 75 | The sample frequency. 76 | fades : interable 77 | An iterable with two values for the milliseconds you want for the fade 78 | in and out (to avoid clicks). 79 | bit_depth : integer 80 | The number of bits in each sample of the final file. 81 | remove_bias : boolean 82 | Whether to remove or not the bias (or offset) 83 | 84 | See Also 85 | -------- 86 | normalize_mono : Normalizes an array to [-1,1] 87 | write_wav_mono : Writes an array with the same arguments and order of them 88 | as scipy.io.wavfile. 89 | write_wav_stereo : Write a stereo file. 90 | 91 | """ 92 | result = normalize_mono(sonic_vector, remove_bias) * \ 93 | (2 ** (bit_depth - 1) - 1) 94 | if fades: 95 | f0, f1 = (fades[0], fades[1]) if isinstance(fades, Sequence) else (0, 0) 96 | result = adsr(attack_duration=f0, sustain_level=0, 97 | release_duration=f1, sonic_vector=result) 98 | if bit_depth not in (8, 16, 32, 64): 99 | raise ValueError( 100 | "bit_depth values allowed are only 8, 16, 32 and 64" 101 | ) 102 | nn = eval("np.int" + str(bit_depth)) 103 | result = nn(result) 104 | wavfile.write(filename, sample_rate, result) 105 | 106 | 107 | def write_wav_stereo( 108 | sonic_vector: ArrayLike = SONIC_VECTOR_STEREO, 109 | filename: str = "asound.wav", 110 | sample_rate: int = 44100, 111 | fades: Sequence[int] | int = 0, 112 | bit_depth: int = 16, 113 | remove_bias: bool = True, 114 | normalize_separately: bool = False, 115 | ) -> None: 116 | """Write a stereo WAV files for a numpy array. 117 | 118 | Parameters 119 | ---------- 120 | sonic_vector : array_like 121 | The PCM samples to be written as a WAV sound file. The samples are 122 | always normalized by normalize_stereo(sonic_vector) to have samples 123 | between -1 and 1 and remove the offset. 124 | Use array of shape (nchannels, nsamples). 125 | filename : string 126 | The filename to use for the file to be written. 127 | sample_rate : scalar 128 | The sample frequency. 129 | fades : interable 130 | An iterable with two values for the milliseconds you want for the fade 131 | in and out (to avoid clicks). 132 | bit_depth : integer 133 | The number of bits in each sample of the final file. 134 | remove_bias : boolean 135 | Whether to remove or not the bias (or offset) 136 | normalize_separately : boolean 137 | Set to True if each channel should be normalized separately. 138 | If False (default), the arrays will be rescaled in the same proportion. 139 | 140 | See Also 141 | -------- 142 | normalize_stereo : Normalizes a stereo array to [-1,1] 143 | write_wav_mono : Write a mono file. 144 | 145 | """ 146 | result = normalize_stereo(sonic_vector, remove_bias, 147 | normalize_separately) * (2 ** 148 | (bit_depth - 1) - 1) 149 | if fades: 150 | f0, f1 = (fades[0], fades[1]) if isinstance(fades, Sequence) else (0, 0) 151 | result = adsr_stereo(attack_duration=f0, sustain_level=0, 152 | release_duration=f1, sonic_vector=result) 153 | if bit_depth not in (8, 16, 32, 64): 154 | raise ValueError( 155 | "bit_depth values allowed are only 8, 16, 32 and 64" 156 | ) 157 | nn = eval("np.int" + str(bit_depth)) 158 | result = nn(result) 159 | wavfile.write(filename, sample_rate, result.T) 160 | 161 | 162 | def play_audio( 163 | sonic_vector: ArrayLike, 164 | sample_rate: int = 44100, 165 | normalize: bool = True, 166 | ) -> None: 167 | """Play a sonic vector using the :mod:`sounddevice` library. 168 | 169 | Parameters 170 | ---------- 171 | sonic_vector : array_like 172 | Samples to be played. Mono arrays should have shape ``(n,)`` and 173 | stereo arrays ``(2, n)``. 174 | sample_rate : int, optional 175 | Playback sample rate. Defaults to ``44100``. 176 | normalize : bool, optional 177 | If ``True`` (default), normalize ``sonic_vector`` before playback using 178 | :func:`normalize_mono` or :func:`normalize_stereo`. 179 | 180 | Notes 181 | ----- 182 | If the ``sounddevice`` module is not installed, this function logs a 183 | warning and returns without playing anything. 184 | """ 185 | 186 | try: 187 | import sounddevice as sd # type: ignore 188 | except Exception: # pragma: no cover - fallback when sounddevice missing 189 | logging.warning("sounddevice module not available; cannot play audio") 190 | return 191 | 192 | data = np.array(sonic_vector, dtype=np.float64) 193 | 194 | if normalize: 195 | if data.ndim == 1: 196 | data = normalize_mono(data) 197 | else: 198 | data = normalize_stereo(data) 199 | 200 | if data.ndim == 2: 201 | data = data.T 202 | 203 | sd.play(data, samplerate=sample_rate) 204 | sd.wait() 205 | -------------------------------------------------------------------------------- /music/core/filters/loud.py: -------------------------------------------------------------------------------- 1 | """Loudness envelope filters and helpers.""" 2 | 3 | import numpy as np 4 | 5 | 6 | def loud(duration=2, trans_dev=10, alpha=1, to=True, method="exp", 7 | number_of_samples=0, sonic_vector=0, sample_rate=44100): 8 | """ 9 | An envelope for a linear or exponential transition of amplitude. 10 | 11 | An exponential transition of loudness yields a linear transition of 12 | loudness (theoretically). 13 | 14 | Parameters 15 | ---------- 16 | duration : scalar 17 | The duration of the envelope in seconds. 18 | trans_dev : scalar 19 | The deviation of the transition. If method="exp", the deviation is in 20 | decibels. If method="linear", the deviation is an amplitude proportion. 21 | alpha : scalar 22 | An index to make the transition slower or faster [1]. 23 | Ignored if method="linear". 24 | to : boolean 25 | If True, the transition ends at the deviation. 26 | If False, the transition starts at the deviation. 27 | method : string 28 | "exp" for an exponential transitions of amplitude (linear loudness). 29 | "linear" for a linear transition of amplitude. 30 | number_of_samples : integer 31 | The number of samples of the envelope. 32 | If supplied, d is ignored. 33 | sonic_vector : array_like 34 | Samples for the envelope to be applied to. 35 | If supplied, d and nsamples are ignored. 36 | sample_rate : integer 37 | The sample rate. 38 | Only used if nsamples and sonic_vector are not supplied. 39 | 40 | Returns 41 | ------- 42 | e : ndarray 43 | A numpy array where each value is a value of the envelope for the PCM 44 | samples. If sonic_vector is supplied, e is the sonic vector with the 45 | envelope applied to it. 46 | 47 | See Also 48 | -------- 49 | louds : An envelope with an arbitrary number of transitions. 50 | fade : Fade in and out. 51 | adsr : An ADSR envelope. 52 | tremolo : An oscillation of loudness. 53 | 54 | Examples 55 | -------- 56 | >>> write_wav_mono(note_with_vibrato() * loud()) 57 | >>> s = horizontal_stack([note_with_vibrato() * 58 | ... loud(trans_dev=i, method=j) 59 | ... for i, j in zip([6, -50, 2.3], 60 | ... ["exp", "exp", "linear"])]) 61 | >>> s = horizontal_stack([ 62 | ... loud(trans_dev=i, method=j, sonic_vector=note_with_vibrato()) 63 | ... for i, j in zip([6, -50, 2.3], ["exp", "exp", "linear"])]) 64 | >>> envelope = loud(duration=10, trans_dev=-80, to=False, alpha=2) 65 | 66 | Notes 67 | ----- 68 | Cite the following article whenever you use this function. 69 | 70 | References 71 | ---------- 72 | .. [1] Fabbri, Renato, et al. "Musical elements in the discrete-time 73 | representation of sound." arXiv preprint arXiv:abs/1412.6853 (2017) 74 | 75 | """ 76 | if type(sonic_vector) in (np.ndarray, list): 77 | n = len(sonic_vector) 78 | elif number_of_samples: 79 | n = number_of_samples 80 | else: 81 | n = int(sample_rate * duration) 82 | samples = np.arange(n) 83 | n_ = n - 1 84 | if 'lin' in method: 85 | if to: 86 | a0 = 1 87 | al = trans_dev 88 | else: 89 | a0 = trans_dev 90 | al = 1 91 | e = a0 + (al - a0) * samples / n_ 92 | if 'exp' in method: 93 | if to: 94 | if alpha != 1: 95 | samples_ = (samples / n_) ** alpha 96 | else: 97 | samples_ = (samples / n_) 98 | else: 99 | if alpha != 1: 100 | samples_ = ((n_ - samples) / n_) ** alpha 101 | else: 102 | samples_ = ((n_ - samples) / n_) 103 | e = 10 ** (samples_ * trans_dev / 20) 104 | if type(sonic_vector) in (np.ndarray, list): 105 | return e * sonic_vector 106 | else: 107 | return e 108 | 109 | 110 | def louds(durations=(2, 4, 2), trans_devs=(5, -10, 20), alpha=(1, .5, 20), 111 | method=("exp", "exp", "exp"), number_of_samples=0, sonic_vector=0, 112 | sample_rate=44100): 113 | """ 114 | An envelope with linear or exponential transitions of amplitude. 115 | 116 | See L() for more details. 117 | 118 | Parameters 119 | ---------- 120 | durations : iterable 121 | The durations of the transitions in seconds. 122 | trans_devs : iterable 123 | The sequence of deviations of the transitions. 124 | If method="exp" the deviations are in decibels. 125 | If method="linear" the deviations are amplitude proportions. 126 | alpha : iterable 127 | Indexes to make the transitions slower or faster [1]. 128 | Ignored it method="linear". 129 | method : iterable 130 | Methods for each transition. 131 | "exp" for exponential transitions of amplitude (linear loudness). 132 | "linear" for linear transitions of amplitude. 133 | number_of_samples : interable 134 | The number of samples of each transition. 135 | If supplied, durations is ignored. 136 | sonic_vector : array_like 137 | Samples for the envelope to be applied to. 138 | If supplied, durations or number_of_samples is given, the final sound 139 | has the greatest duration of sonic_vector and durations 140 | (or number_of_samples) and missing samples are replaced with silence 141 | (if sonic_vector is shorter) or with a constant value (if durations or 142 | number_of_samples yield shorter sequences). 143 | sample_rate : integer 144 | The sample rate. 145 | Only used if nsamples and sonic_vector are not supplied. 146 | 147 | Returns 148 | ------- 149 | e : ndarray 150 | A numpy array where each value is a value of the envelope for the PCM 151 | samples. If sonic_vector is supplied, e is the sonic vector with the 152 | envelope applied to it. 153 | 154 | See Also 155 | -------- 156 | loud : An envelope for a loudness transition. 157 | fade : Fade in and out. 158 | adsr : An ADSR envelope. 159 | tremolo : An oscillation of loudness. 160 | 161 | Examples 162 | -------- 163 | >>> write_wav_mono(note_with_vibrato(duraton=8) * louds()) 164 | 165 | Notes 166 | ----- 167 | Cite the following article whenever you use this function. 168 | 169 | References 170 | ---------- 171 | .. [1] Fabbri, Renato, et al. "Musical elements in the discrete-time 172 | representation of sound." arXiv preprint arXiv:abs/1412.6853 (2017) 173 | 174 | """ 175 | if type(sonic_vector) in (np.ndarray, list): 176 | n = len(sonic_vector) 177 | elif number_of_samples: 178 | n = sum(number_of_samples) 179 | else: 180 | n = int(sample_rate * sum(durations)) 181 | s = [] 182 | fact = 1 183 | if number_of_samples: 184 | for i, ns in enumerate(number_of_samples): 185 | s_ = loud(trans_devs[i], alpha[i], number_of_samples=ns, 186 | method=method[i]) * fact 187 | s.append(s_) 188 | fact = s_[-1] 189 | else: 190 | for i, dur in enumerate(durations): 191 | s_ = loud(dur, trans_devs[i], alpha[i], 192 | method=method[i], sample_rate=sample_rate) * fact 193 | s.append(s_) 194 | fact = s_[-1] 195 | e = np.hstack(s) 196 | 197 | if type(sonic_vector) in (np.ndarray, list): 198 | if len(e) < len(sonic_vector): 199 | s = np.hstack((e, np.ones(len(sonic_vector) - len(e)) * e[-1])) 200 | if len(e) > len(sonic_vector): 201 | sonic_vector = np.hstack((sonic_vector, np.ones( 202 | len(e) - len(sonic_vector)) * e[-1])) 203 | return sonic_vector * e 204 | else: 205 | return e 206 | -------------------------------------------------------------------------------- /music/legacy/classes.py: -------------------------------------------------------------------------------- 1 | """Utility classes supporting the legacy synthesizer API.""" 2 | 3 | import numpy as n 4 | import logging 5 | 6 | from music.legacy import tables 7 | from music.utils import horizontal_stack 8 | from music.core.io import write_wav_mono 9 | from music.core.filters.adsr import adsr 10 | from music.core.synths.notes import note_with_vibrato 11 | from music.utils import WAVEFORM_TRIANGULAR, WAVEFORM_SINE 12 | 13 | T = tables.Basic() 14 | n_ = n 15 | 16 | 17 | def V_(st=0, freq=220, duration=2., vibrato_freq=2., max_pitch_dev=2., 18 | waveform_table=WAVEFORM_TRIANGULAR, 19 | vibrato_waveform_table=WAVEFORM_SINE): 20 | """A shorthand for generating a note with vibrato. 21 | 22 | Args: 23 | st (float): Semitones relative to the base frequency. 24 | freq (float): Base frequency of the note. 25 | duration (float): Duration of the note in seconds. 26 | vibrato_freq (float): Frequency of the vibrato. 27 | max_pitch_dev (float): Maximum pitch deviation. 28 | waveform_table (array): Table representing the waveform of the note. 29 | vibrato_waveform_table (array): Table representing the waveform of the 30 | vibrato. 31 | 32 | Returns: 33 | array: A note with vibrato. 34 | """ 35 | f_ = freq * 2 ** (st / 12) 36 | return note_with_vibrato(freq=f_, duration=2., vibrato_freq=2., 37 | max_pitch_dev=2., 38 | waveform_table=WAVEFORM_TRIANGULAR, 39 | vibrato_waveform_table=WAVEFORM_SINE) 40 | 41 | 42 | def ADV(note_dict={}, adsr_dict={}): 43 | """Apply ADSR envelope to a note with vibrato. 44 | 45 | Args: 46 | note_dict (dict): Dictionary containing parameters for the note. 47 | adsr_dict (dict): Dictionary containing parameters for the ADSR 48 | envelope. 49 | 50 | Returns: 51 | array: Note with applied ADSR envelope. 52 | """ 53 | return adsr(sonic_vector=V_(**note_dict), **adsr_dict) 54 | 55 | 56 | class Being: 57 | def __init__(self): 58 | rhythm = [1.] # repetition of one second 59 | rhythm2 = [1/2, 1/2] # repetition of one second 60 | rhythm3 = [1/3, 1/3, 1/3] # repetition of one second 61 | rhythm4 = [1/4, 1/4, 1/3] # repetition of one second 62 | 63 | # assume duration = 1 (be 1 second, minute or whatnot): 64 | rhythmic_spectrum = [[1. / i] * i for i in range(1, 300)] 65 | 66 | # pitch or frequency sequences (to be used at will) 67 | f = 110 68 | freqs = [220] 69 | freq_spectrum = [i*f for i in range(1, 300)] 70 | neg_spec = [f/i for i in range(2, 300)] 71 | 72 | freq_sym = [[f*2**((i*j)/12) for i in range(j)] for j in [2, 3, 4, 6]] 73 | freq_sym_ = [[f*2**((i*j)/12) for i in range(300)] 74 | for j in [2, 3, 4, 6]] 75 | 76 | dia = [2, 2, 1, 2, 2, 2, 1] 77 | notes_diatonic = [[dia[(j+i) % 7] for i in range(7)] for j in range(7)] 78 | notes_diatonic_ = [sum(notes_diatonic[i]) for i in range(7)] 79 | freq_diatonic = [[f*2**((12 * i + notes_diatonic_[j])/12) 80 | for i in range(30)] for j in range(7)] 81 | 82 | intensity_octaves = [[10**((i*10)/(j*20)) for i in range(300)] 83 | for j in range(1, 20)] # steps of 10db - 1/2 dB 84 | db0 = 10**(-120/20) 85 | intensity_spec = [[db0*i for i in j] for j in intensity_octaves] 86 | 87 | # diatonic noise, noises derived from the symmetric scales etc: one 88 | # sinusoid or other basic waveform in each note. 89 | # Synth on the freq domain to optimize and simplify the process 90 | 91 | # make music of the spheres using ellipses and relations recalling 92 | # gravity 93 | self.resources = locals() 94 | self.startBeing() 95 | 96 | def walk(self, n, method='straight'): 97 | """Walk a certain number of steps. 98 | 99 | Args: 100 | n (int): Number of steps. 101 | method (str): Method of walking. 102 | 103 | Returns: 104 | array: Sequence of steps. 105 | """ 106 | if method == 'straight': 107 | # ** TTM 108 | sequence = [self.grid[self.pointer + i] for i in range(n)] 109 | self.pointer += n 110 | elif method == 'low-high': 111 | sequence = [self.grid[self.pointer + i % (self.seqsize + 1) + i // 112 | self.seqsize] for i in range(n*self.seqsize)] 113 | elif method == 'perm-walk': 114 | # restore walk from 02peal 115 | pass 116 | self.addSeq(sequence) 117 | 118 | def setPar(self, par='f'): 119 | """Set parameter to be developed in walks and stays. 120 | 121 | Args: 122 | par (str): Parameter to be set. 123 | 124 | Returns: 125 | None 126 | """ 127 | if par == 'f': 128 | self.grid = self.fgrid 129 | self.pointer = self.fpointer 130 | 131 | def setSize(self, ss): 132 | """Set the size. 133 | 134 | Args: 135 | ss (int): Size to set. 136 | 137 | Returns: 138 | None 139 | """ 140 | self.seqsize = ss 141 | 142 | def setPerms(self, perms): 143 | """Set permutations. 144 | 145 | Args: 146 | perms (list): List of permutations. 147 | 148 | Returns: 149 | None 150 | """ 151 | self.perms = perms 152 | 153 | def stay(self, n, method='perm'): 154 | """Stay for a certain number of notes. 155 | 156 | Args: 157 | n (int): Number of notes. 158 | method (str): Method of staying. 159 | 160 | Returns: 161 | array: Sequence of stayed notes. 162 | """ 163 | if method == 'straight': 164 | sequence = [self.grid[(self.pointer + i) % self.seqsize] 165 | for i in range(n)] 166 | elif method == 'perm': 167 | # ** TTM 168 | sequence = [] 169 | if not isinstance(self.domain, n_.ndarray): 170 | if not self.domain: 171 | domain = self.grid[self.pointer: self.pointer + 172 | self.seqsize] 173 | else: 174 | domain = n_.array(self.domain) 175 | logging.debug("Implemented OK?? TTM") 176 | else: 177 | domain = self.domain 178 | # nel = self.perms[0].size # should match self.seqsize ? 179 | count = 0 180 | while len(sequence) < n: 181 | perm = self.perms[count % len(self.perms)] 182 | seq = perm(domain) 183 | sequence.extend(seq) 184 | count += 1 185 | sequence = sequence[:n] 186 | self.addSeq(sequence) 187 | self.total_notes += n 188 | 189 | def addSeq(self, sequence): 190 | """Add sequence to the Being. 191 | 192 | Args: 193 | sequence (array): Sequence to add. 194 | 195 | Returns: 196 | None 197 | """ 198 | if isinstance(self.__dict__[self.curseq], list): 199 | self.__dict__[self.curseq].extend(sequence) 200 | else: 201 | self.__dict__[self.curseq] = \ 202 | horizontal_stack(self.__dict__[self.curseq], sequence) 203 | 204 | def render(self, nn, fn=False): 205 | """Render notes of the Being. 206 | 207 | Args: 208 | nn (int): Number of notes to render. 209 | fn (str or bool): File name to save the rendered notes. 210 | 211 | Returns: 212 | array or None: Rendered notes. 213 | """ 214 | self.mkArray() 215 | ii = n.arange(nn) 216 | duration = self.d_[ii % len(self.d_)]*self.dscale 217 | freq = self.f_[ii % len(self.f_)] 218 | waveform_table = self.tab_[ii % len(self.tab_)] 219 | vibrato_freq = self.fv_[ii % len(self.fv_)] 220 | max_pitch_dev = self.nu_[ii % len(self.nu_)] 221 | A = self.A_[ii % len(self.A_)] 222 | D = self.D_[ii % len(self.D_)] 223 | S = self.S_[ii % len(self.S_)] 224 | R = self.R_[ii % len(self.R_)] 225 | notes = [ADV({'freq': ff, 'duration': dd, 'vibrato_freq': fvv, 226 | 'max_pitch_dev': nuu, 'waveform_table': tabb}, 227 | {'attack_duration': AA, 'decay_duration': DD, 228 | 'sustain_level': SS, 'release_duration': RR}) 229 | for ff, dd, fvv, nuu, tabb, AA, DD, SS, RR 230 | in zip(freq, duration, vibrato_freq, max_pitch_dev, 231 | waveform_table, A, D, S, R)] 232 | if fn: 233 | if not isinstance(fn, str): 234 | fn = 'abeing.wav' 235 | if fn[-4:] != '.wav': 236 | fn += '.wav' 237 | write_wav_mono(horizontal_stack(*notes), fn) 238 | else: 239 | return horizontal_stack(*notes) 240 | 241 | def startBeing(self): 242 | """Start the Being. 243 | 244 | Args: 245 | None 246 | 247 | Returns: 248 | None 249 | """ 250 | self.dscale = 1 251 | self.d_ = [1] 252 | self.f_ = [220] 253 | self.fv_ = [3] 254 | self.nu_ = [1] 255 | self.tab_ = [T.triangle] 256 | self.A_ = [20] 257 | self.D_ = [20] 258 | self.S_ = [-5] 259 | self.R_ = [50] 260 | self.mkArray() 261 | self.total_notes = 0 262 | 263 | def mkArray(self): 264 | """Make array. 265 | 266 | Args: 267 | None 268 | 269 | Returns: 270 | None 271 | """ 272 | self.d_ = n.array(self.d_) 273 | self.f_ = n.array(self.f_) 274 | self.fv_ = n.array(self.fv_) 275 | self.nu_ = n.array(self.nu_) 276 | self.tab_ = n.array(self.tab_) 277 | self.A_ = n.array(self.A_) 278 | self.D_ = n.array(self.D_) 279 | self.S_ = n.array(self.S_) 280 | self.R_ = n.array(self.R_) 281 | 282 | def howl(self): 283 | """Produce a howling sound. 284 | 285 | Args: 286 | None 287 | 288 | Returns: 289 | None 290 | """ 291 | pass 292 | 293 | def freeze(self): 294 | """Freeze the Being. 295 | 296 | Args: 297 | None 298 | 299 | Returns: 300 | None 301 | """ 302 | pass 303 | -------------------------------------------------------------------------------- /music/structures/permutations.py: -------------------------------------------------------------------------------- 1 | """Provides tools for working with interesting permutations. 2 | 3 | This module defines the `InterestingPermutations` class, which facilitates the 4 | generation and manipulation of permutations with specific properties. It also 5 | includes utility functions for permutation operations. 6 | 7 | Classes: 8 | - InterestingPermutations: Provides tools for generating and manipulating 9 | permutations with specific properties. 10 | 11 | Functions: 12 | - dist: Calculates the distance between elements in a swap permutation. 13 | - transpose_permutation: Transposes a permutation by a specified step. 14 | 15 | Example: 16 | To work with interesting permutations: 17 | 18 | >>> from sympy.combinatorics import Permutation 19 | >>> from sympy.combinatorics.named_groups import AlternatingGroup 20 | >>> interesting_perms = InterestingPermutations(nelements=4, 21 | >>> method="dimino") 22 | >>> print(interesting_perms.alternations) 23 | 24 | """ 25 | from sympy.combinatorics import Permutation 26 | from sympy.combinatorics.named_groups import AlternatingGroup 27 | import sympy 28 | 29 | 30 | class InterestingPermutations: 31 | """Get permutations of n elements in meaningful sequences. 32 | Mirrors are ordered by swaps (0,n-1...). 33 | 34 | Methods: 35 | - get_alternating: Generates permutations in the alternating group. 36 | - get_rotations: Generates rotations of permutations. 37 | - get_mirrors: Generates mirror permutations. 38 | - get_swaps: Generates swap permutations. 39 | - even_odd: Determines if a permutation is even or odd. 40 | - get_full_symmetry: Generates permutations with full symmetry. 41 | """ 42 | def __init__(self, nelements=4, method="dimino"): 43 | self.permutations_by_sizes = None 44 | self.permutations = None 45 | self.neighbor_swaps = None 46 | self.swaps_by_stepsizes = None 47 | self.swaps_as_comes = None 48 | self.vertex_mirrors = None 49 | self.edge_mirrors = None 50 | self.swaps = None 51 | self.rotations = None 52 | self.mirrors = None 53 | self.dihedral = None 54 | self.alternations_by_sizes = None 55 | self.alternations_complement = None 56 | self.alternations = None 57 | self.nelements = nelements 58 | self.neutral_perm = Permutation([0], size=nelements) 59 | self.method = method 60 | self.get_rotations() 61 | self.get_mirrors() 62 | self.get_alternating() 63 | self.get_full_symmetry() 64 | self.get_swaps() 65 | 66 | def get_alternating(self): 67 | """Generates permutations in the alternating group. 68 | 69 | This method generates permutations in the alternating group of the 70 | specified size using the provided generation method. 71 | """ 72 | self.alternations = list(AlternatingGroup(self.nelements). 73 | generate(method=self.method)) 74 | self.alternations_complement = [i for i in self.alternations 75 | if i not in self.dihedral] 76 | length_max = self.nelements 77 | self.alternations_by_sizes = [] 78 | for length in range(0, 1 + length_max): 79 | # while length in [i.length() 80 | # for i in self.alternations_complement]: 81 | self.alternations_by_sizes.append( 82 | [i for i in self.alternations_complement 83 | if i.length() == length]) 84 | 85 | assert len(self.alternations_complement) ==\ 86 | sum([len(i)for i in self.alternations_by_sizes]) 87 | 88 | def get_rotations(self): 89 | """Generates rotations of permutations. 90 | 91 | This method generates rotations of permutations of the specified size 92 | using the provided generation method. 93 | """ 94 | self.rotations = list(sympy.combinatorics.named_groups. 95 | CyclicGroup(self.nelements). 96 | generate(method=self.method)) 97 | 98 | def get_mirrors(self): 99 | """Generates mirror permutations. 100 | 101 | This method generates mirror permutations of the specified size using 102 | the provided generation method. 103 | """ 104 | if self.nelements > 2: # bug in sympy? 105 | self.dihedral = list(sympy.combinatorics.named_groups. 106 | DihedralGroup(self.nelements). 107 | generate(method=self.method)) 108 | else: 109 | self.dihedral = [Permutation([0], size=self.nelements), 110 | Permutation([1, 0], size=self.nelements)] 111 | self.mirrors = [i for i in self.dihedral if i not in self.rotations] 112 | # even elements have edge and vertex mirrors 113 | if self.nelements % 2 == 0: 114 | self.edge_mirrors = [i for i in self.mirrors 115 | if i.length() == self.nelements] 116 | self.vertex_mirrors = [i for i in self.mirrors 117 | if i.length() == self.nelements - 2] 118 | assert len(self.edge_mirrors + self.vertex_mirrors) ==\ 119 | len(self.mirrors) 120 | 121 | def get_swaps(self): 122 | """Generates swap permutations. 123 | 124 | This method generates swap permutations of the specified size using 125 | the provided generation method. 126 | """ 127 | self.swaps = sorted(self.permutations_by_sizes[0], 128 | key=lambda x: -x.rank()) 129 | self.swaps_as_comes = self.permutations_by_sizes[0] 130 | self.swaps_by_stepsizes = [] 131 | self.neighbor_swaps = [sympy.combinatorics. 132 | Permutation(i, i + 1, size=self.nelements) 133 | for i in range(self.nelements - 1)] 134 | dist_ = 1 135 | while dist_ in [dist(i) for i in self.swaps]: 136 | self.swaps_by_stepsizes += [[i for i in self.swaps 137 | if dist(i) == dist_]] 138 | dist_ += 1 139 | 140 | def even_odd(self, sequence): 141 | """Determines if a permutation is even or odd. 142 | 143 | This method determines if a given permutation is even or odd based on 144 | its sequence of elements. 145 | 146 | Parameters: 147 | sequence (list): The sequence of elements representing the 148 | permutation. 149 | 150 | Returns: 151 | str: Either 'even' or 'odd' indicating the parity of the 152 | permutation. 153 | """ 154 | n = len(sequence) 155 | visited = [False] * n 156 | parity = 0 157 | 158 | for i in range(n): 159 | if not visited[i]: 160 | cycle_length = 0 161 | x = i 162 | 163 | while not visited[x]: 164 | visited[x] = True 165 | x = sequence[x] 166 | cycle_length += 1 167 | 168 | if cycle_length > 0: 169 | parity += cycle_length - 1 170 | 171 | return 'even' if parity % 2 == 0 else 'odd' 172 | 173 | def get_full_symmetry(self): 174 | """Generates permutations with full symmetry. 175 | 176 | This method generates permutations with full symmetry of the specified 177 | size using the provided generation method. 178 | """ 179 | self.permutations = list(sympy.combinatorics.named_groups. 180 | SymmetricGroup(self.nelements). 181 | generate(method=self.method)) 182 | # sympy.combinatorics.generators.symmetric(self.nelements) 183 | self.permutations_by_sizes = [] 184 | length = 2 185 | while length in [i.length() for i in self.permutations]: 186 | self.permutations_by_sizes += [[i for i in self.permutations 187 | if i.length() == length]] 188 | length += 1 189 | 190 | 191 | def dist(swap): 192 | """ 193 | Computes the cyclic distance between the two elements of a permutation. 194 | 195 | Parameters 196 | ---------- 197 | swap : sympy.combinatorics.Permutation 198 | A permutation object with exactly two elements in its support. 199 | 200 | Returns 201 | ------- 202 | int 203 | The cyclic distance between the two elements. 204 | 205 | Notes 206 | ----- 207 | The distance is adjusted to account for the circular nature of the 208 | permutation. If the difference is greater than or equal to half the size 209 | of the permutation, the distance is calculated as the size of the 210 | permutation minus the difference. 211 | 212 | Examples 213 | -------- 214 | >>> from sympy.combinatorics import Permutation 215 | >>> perm = Permutation([1, 0, 2]) 216 | >>> dist(perm) 217 | 1 218 | >>> perm = Permutation([2, 0, 1]) 219 | >>> dist(perm) 220 | 1 221 | """ 222 | if swap.size % 2 == 0: 223 | half = swap.size / 2 224 | else: 225 | half = swap.size // 2 + 1 226 | diff = abs(swap.support()[1] - swap.support()[0]) 227 | if diff >= half: 228 | diff = swap.size - diff 229 | return diff 230 | 231 | 232 | def transpose_permutation(permutation, step=1): 233 | """ 234 | Transposes (shifts) the elements of a permutation by a given step. 235 | 236 | Parameters 237 | ---------- 238 | permutation : sympy.combinatorics.Permutation 239 | The permutation to be transposed. 240 | step : int, optional 241 | The number of positions to shift each element of the permutation, 242 | by default 1. 243 | 244 | Returns 245 | ------- 246 | sympy.combinatorics.Permutation 247 | A new permutation with elements shifted by the specified step. 248 | 249 | Notes 250 | ----- 251 | If `step` is 0, the function returns the original permutation. 252 | 253 | Examples 254 | -------- 255 | >>> from sympy.combinatorics import Permutation 256 | >>> perm = Permutation([2, 0, 1]) 257 | >>> transpose_permutation(perm, 1) 258 | Permutation([3, 1, 2]) 259 | >>> transpose_permutation(perm, 0) 260 | Permutation([2, 0, 1]) 261 | """ 262 | if not step: 263 | return permutation 264 | new_indexes = (i + step for i in permutation.support()) 265 | return sympy.combinatorics.Permutation(*new_indexes) 266 | -------------------------------------------------------------------------------- /music/legacy/CanonicalSynth.py: -------------------------------------------------------------------------------- 1 | """Reference synthesizer demonstrating basic techniques.""" 2 | 3 | import numpy as n 4 | import music as M 5 | 6 | 7 | class CanonicalSynth: 8 | """ 9 | Simple synthesizer for sound synthesis with vibrato, tremolo, and ADSR. 10 | 11 | All functions but absorbState return a sonic array. 12 | You can parametrize the synth in any function call. 13 | If you want to keep some set of states for specific 14 | calls, clone your CanonicalSynth or create a new instance. 15 | You can also pass arbitrary variables to use later on. 16 | 17 | Parameters 18 | ---------- 19 | f : scalar 20 | The frequency of the note in Hertz. 21 | d : scalar 22 | The duration of the note in seconds. 23 | fv : scalar 24 | The frequency of the vibrato oscillations in Hertz. 25 | nu : scalar 26 | The maximum deviation of pitch in the vibrato in semitones. 27 | tab : array_like 28 | The table with the waveform to synthesize the sound. 29 | tabv : array_like 30 | The table with the waveform of the vibrato oscillatory pattern. 31 | 32 | Examples 33 | -------- 34 | >>> cs = CanonicalSynth() 35 | # TODO: develop example 36 | """ 37 | 38 | def __init__(s, **statevars): 39 | """ 40 | Initializes the synthesizer with given state variables. 41 | 42 | Parameters 43 | ---------- 44 | **statevars : dict 45 | Arbitrary keyword arguments for state variables. 46 | """ 47 | s.absorbState(**statevars) 48 | if "tables" not in dir(s): 49 | s.tables = M.legacy.tables.Basic() 50 | if "samplerate" not in dir(s): 51 | s.samplerate = 44100 52 | s.synthSetup() 53 | s.adsrSetup() 54 | 55 | def synthSetup(self, table=None, vibrato_table=None, tremolo_table=None, 56 | vibrato_depth=.1, vibrato_frequency=2., tremolo_depth=3., 57 | tremolo_frequency=0.2, duration=2, 58 | fundamental_frequency=220): 59 | """ 60 | Setup synth engine. ADSR is configured separately. 61 | 62 | Parameters 63 | ---------- 64 | table : array_like, optional 65 | The waveform table for sound synthesis, by default None. 66 | vibrato_table : array_like, optional 67 | The waveform table for vibrato oscillatory pattern, by default 68 | None. 69 | tremolo_table : array_like, optional 70 | The waveform table for tremolo oscillatory pattern, by default 71 | None. 72 | vibrato_depth : float, optional 73 | The depth of vibrato in semitones, by default 0.1. 74 | vibrato_frequency : float, optional 75 | The frequency of the vibrato oscillations in Hertz, by default 2.0. 76 | tremolo_depth : float, optional 77 | The depth of tremolo in decibels, by default 3.0. 78 | tremolo_frequency : float, optional 79 | The frequency of the tremolo oscillations in Hertz, by default 0.2. 80 | duration : float, optional 81 | The duration of the note in seconds, by default 2. 82 | fundamental_frequency : float, optional 83 | The fundamental frequency of the note in Hertz, by default 220. 84 | """ 85 | if not table: 86 | table = self.tables.triangle 87 | if vibrato_depth and vibrato_frequency: 88 | vibrato = True 89 | if not vibrato_table: 90 | vibrato_table = self.tables.sine 91 | else: 92 | vibrato = False 93 | if tremolo_depth and tremolo_frequency: 94 | tremolo = True 95 | if not tremolo_table: 96 | tremolo_table = self.tables.sine 97 | else: 98 | tremolo = False 99 | locals_ = locals().copy() 100 | del locals_["self"] 101 | for i in locals_: 102 | exec("self.{}={}".format(i, i)) 103 | 104 | def adsrSetup(self, A=100., D=40, S=-5., R=50, render_note=False, 105 | adsr_method="absolute"): 106 | """ 107 | Setup ADSR parameters. 108 | 109 | Parameters 110 | ---------- 111 | A : float, optional 112 | Attack time in milliseconds, by default 100. 113 | D : int, optional 114 | Decay time in milliseconds, by default 40. 115 | S : float, optional 116 | Sustain level in decibels, by default -5. 117 | R : int, optional 118 | Release time in milliseconds, by default 50. 119 | render_note : bool, optional 120 | Whether to render the note immediately, by default False. 121 | adsr_method : str, optional 122 | The ADSR method, by default "absolute". 123 | """ 124 | adsr_method = adsr_method # implement relative and False 125 | a_S = 10 ** (S / 20.) 126 | Lambda_A = int(A * self.samplerate * 0.001) 127 | Lambda_D = int(D * self.samplerate * 0.001) 128 | Lambda_R = int(R * self.samplerate * 0.001) 129 | 130 | ii = n.arange(Lambda_A, dtype=n.float64) 131 | A_ = ii / (Lambda_A - 1) 132 | A_i = n.copy(A_) 133 | ii = n.arange(Lambda_A, Lambda_D + Lambda_A, dtype=n.float64) 134 | D = 1 - (1 - a_S) * ((ii - Lambda_A) / (Lambda_D - 1)) 135 | D_i = n.copy(D) 136 | R = a_S * n.linspace(1, 0, Lambda_R) 137 | R_i = n.copy(R) 138 | locals_ = locals().copy() 139 | del locals_["self"] 140 | for i in locals_: 141 | exec("self.{}={}".format(i, i)) 142 | 143 | def adsrApply(self, audio_vec): 144 | """ 145 | Apply ADSR envelope to the audio vector. 146 | 147 | Parameters 148 | ---------- 149 | audio_vec : array_like 150 | Input audio vector. 151 | 152 | Returns 153 | ------- 154 | array_like 155 | Audio vector with applied ADSR envelope. 156 | """ 157 | Lambda = len(audio_vec) 158 | S = n.ones(Lambda - self.Lambda_R - (self.Lambda_A + self.Lambda_D), 159 | dtype=n.float64) * self.a_S 160 | envelope = n.hstack((self.A_i, self.D_i, S, self.R_i)) 161 | return envelope * audio_vec 162 | 163 | def render(self, **statevars): 164 | """ 165 | Render a note with given parameters. 166 | 167 | Parameters 168 | ---------- 169 | **statevars : dict 170 | Arbitrary keyword arguments for state variables. 171 | 172 | Returns 173 | ------- 174 | array_like 175 | Rendered audio vector. 176 | """ 177 | self.absorbState(**statevars) 178 | tremolo_envelope = self.tremoloEnvelope() 179 | note = self.rawRender() 180 | note = note * tremolo_envelope 181 | note = self.adsrApply(note) 182 | return note 183 | 184 | def tremoloEnvelope(self, sonic_vector=None, **statevars): 185 | """ 186 | Calculate the tremolo envelope. 187 | 188 | Parameters 189 | ---------- 190 | sonic_vector : array_like, optional 191 | Input sonic vector, by default None. 192 | **statevars : dict 193 | Arbitrary keyword arguments for state variables. 194 | 195 | Returns 196 | ------- 197 | array_like 198 | Tremolo envelope. 199 | """ 200 | self.absorbState(**statevars) 201 | if sonic_vector: 202 | Lambda = len(sonic_vector) 203 | else: 204 | Lambda = n.floor(self.samplerate * self.duration) 205 | ii = n.arange(Lambda) 206 | Lt = len(self.tremolo_table) 207 | Gammaa_i = n.floor(ii * self.tremolo_frequency * Lt / 208 | self.samplerate) 209 | Gammaa_i = n.array(Gammaa_i, n.int64) 210 | A_i = self.tremolo_table[Gammaa_i % Lt] 211 | A_i = 10. ** ((self.tremolo_depth / 20.) * A_i) 212 | if sonic_vector is not None: 213 | return A_i * sonic_vector 214 | else: 215 | return A_i 216 | 217 | def absorbState(s, **statevars): 218 | """ 219 | Absorb state variables. 220 | 221 | Parameters 222 | ---------- 223 | **statevars : dict 224 | Arbitrary keyword arguments for state variables. 225 | """ 226 | for varname in statevars: 227 | s.__dict__[varname] = statevars[varname] 228 | 229 | def rawRender(self, **statevars): 230 | """ 231 | Render the sound without applying ADSR. 232 | 233 | Parameters 234 | ---------- 235 | **statevars : dict 236 | Arbitrary keyword arguments for state variables. 237 | 238 | Returns 239 | ------- 240 | array_like 241 | Rendered audio vector. 242 | """ 243 | self.absorbState(**statevars) 244 | Lambda = n.floor(self.samplerate * self.duration) 245 | ii = n.arange(Lambda) 246 | Lv = len(self.vibrato_table) 247 | Gammav_i = n.floor(ii * self.vibrato_frequency * Lv / 248 | self.samplerate) 249 | Gammav_i = n.array(Gammav_i, n.int64) 250 | Tv_i = self.vibrato_table[Gammav_i % Lv] 251 | F_i = self.fundamental_frequency * (2. ** 252 | (Tv_i * self.vibrato_depth / 12.)) 253 | Lt = len(self.table) 254 | D_gamma_i = F_i * (Lt / self.samplerate) 255 | Gamma_i = n.cumsum(D_gamma_i) 256 | Gamma_i = n.floor(Gamma_i) 257 | Gamma_i = n.array(Gamma_i, dtype=n.int64) 258 | return self.table[Gamma_i % int(Lt)] 259 | 260 | def render2(self, **statevars): 261 | """ 262 | Render the sound and apply ADSR. 263 | 264 | Parameters 265 | ---------- 266 | **statevars : dict 267 | Arbitrary keyword arguments for state variables. 268 | 269 | Returns 270 | ------- 271 | array_like 272 | Rendered audio vector with applied ADSR envelope. 273 | """ 274 | self.absorbState(**statevars) 275 | Lambda = n.floor(self.samplerate * self.duration) 276 | ii = n.arange(Lambda) 277 | Lv = len(self.vibrato_table) 278 | Gammav_i = n.floor(ii * self.vibrato_frequency * Lv / 279 | self.samplerate) 280 | Gammav_i = n.array(Gammav_i, n.int64) 281 | Tv_i = self.vibrato_table[Gammav_i % Lv] 282 | F_i = self.fundamental_frequency * (2. ** 283 | (Tv_i * self.vibrato_depth / 12.)) 284 | Lt = self.tables.size 285 | D_gamma_i = F_i * (Lt / self.samplerate) 286 | Gamma_i = n.cumsum(D_gamma_i) 287 | Gamma_i = n.floor(Gamma_i) 288 | Gamma_i = n.array(Gamma_i, dtype=n.int64) 289 | sound = self.table[Gamma_i % int(Lt)] 290 | sound = self.adsrApply(sound) 291 | return sound 292 | -------------------------------------------------------------------------------- /music/core/synths/envelopes.py: -------------------------------------------------------------------------------- 1 | """Synthesis envelopes such as AM and tremolo.""" 2 | 3 | import numpy as np 4 | from music.utils import WAVEFORM_SINE, WAVEFORM_TRIANGULAR 5 | 6 | 7 | def am(duration=2, fm=50, max_amplitude=.4, waveform_table=WAVEFORM_SINE, 8 | number_of_samples=0, sonic_vector=0, sample_rate=44100): 9 | """ 10 | Synthesize an AM envelope or apply it to a sound. 11 | 12 | Set fm=0 or max_amplitude=0 for a constant envelope with value 1. An AM is 13 | a linear oscillatory pattern of amplitude [1]. 14 | 15 | Parameters 16 | ---------- 17 | duration : scalar 18 | The duration of the envelope in seconds. 19 | fm : scalar 20 | The frequency of the modulation in Hertz. 21 | max_amplitude : scalar 22 | The maximum deviation of amplitude of the AM. 23 | waveform_table : array_like 24 | The table with the waveform for the tremolo oscillatory pattern. 25 | number_of_samples : integer 26 | The number of samples of the envelope. If supplied, d is ignored. 27 | sonic_vector : array_like 28 | Samples for the tremolo to be applied to. 29 | If supplied, d and nsamples are ignored. 30 | sample_rate : integer 31 | The sample rate. 32 | 33 | Returns 34 | ------- 35 | t : ndarray 36 | A numpy array where each value is a PCM sample of the envelope. If 37 | sonic_vectoris input, T is the sonic vector with the AM applied to it. 38 | 39 | See Also 40 | -------- 41 | note_with_vibrato : A musical note with an oscillation of pitch. 42 | fm : A linear oscillation of fundamental frequency. 43 | tremolo : A tremolo, an oscillation of loudness. 44 | 45 | Examples 46 | -------- 47 | >>> W(V()*am()) # writes a WAV file of a note with tremolo 48 | >>> s = H([V()*am(fm=i, a=j) for i, j in zip([60, 150, 100], 49 | [2, 1, 20])]) 50 | >>> s = H([am(fm=i, a=j, sonic_vector=V()) for i, j in zip([60, 150, 100], 51 | [2, 1, 20])]) 52 | >>> envelope2 = am(440, 150, 60) # a lengthy envelope 53 | 54 | Notes 55 | ----- 56 | In the MASS framework implementation, for obtaining a sound with a tremolo 57 | (or AM), the tremolo pattern is considered separately from a synthesis of 58 | the sound. 59 | 60 | The AM is an oscilattory pattern of amplitude while the tremolo is an 61 | oscilattory pattern of loudness being: loudness ~ log(amplitude) 62 | 63 | The vibrato and FM patterns are considered when synthesizing the sound. 64 | 65 | One might want to run this function twice to obtain a stereo reverberation. 66 | 67 | Cite the following article whenever you use this function. 68 | 69 | References 70 | ---------- 71 | .. [1] Fabbri, Renato, et al. "Musical elements in the discrete-time 72 | representation of sound." arXiv preprint arXiv:abs/1412.6853 (2017) 73 | 74 | """ 75 | 76 | waveform_table = np.array(waveform_table) 77 | if type(sonic_vector) in (np.ndarray, list): 78 | lambda_am = len(sonic_vector) 79 | elif number_of_samples: 80 | lambda_am = number_of_samples 81 | else: 82 | lambda_am = np.floor(sample_rate * duration) 83 | samples = np.arange(lambda_am) 84 | 85 | length = len(waveform_table) 86 | # indexes for LUT 87 | gamma_am = (samples * fm * length / sample_rate).astype(np.int64) 88 | # amplitude variation at each sample 89 | t_am = waveform_table[gamma_am % length] 90 | t = 1 + t_am * max_amplitude 91 | if type(sonic_vector) in (np.ndarray, list): 92 | return t * sonic_vector 93 | else: 94 | return t 95 | 96 | 97 | def tremolo(duration=2, tremolo_freq=2, max_db_dev=10, alpha=1, 98 | waveform_table=WAVEFORM_SINE, number_of_samples=0, 99 | sonic_vector=0, sample_rate=44100): 100 | """ 101 | Synthesize a tremolo envelope or apply it to a sound. 102 | 103 | Set fa=0 or dB=0 for a constant envelope with value 1. A tremolo is an 104 | oscillatory pattern of loudness [1]. 105 | 106 | Parameters 107 | ---------- 108 | duration : scalar 109 | The duration of the envelope in seconds. 110 | tremolo_freq : scalar 111 | The frequency of the tremolo oscillations in Hertz. 112 | max_db_dev : scalar 113 | The maximum deviation of loudness in the tremolo in decibels. 114 | alpha : scalar 115 | An index to distort the tremolo pattern [1]. 116 | waveform_table : array_like 117 | The table with the waveform for the tremolo oscillatory pattern. 118 | number_of_samples : integer 119 | The number of samples of the envelope. If supplied, d is ignored. 120 | sonic_vector : array_like 121 | Samples for the tremolo to be applied to. 122 | If supplied, d and nsamples are ignored. 123 | sample_rate : integer 124 | The sample rate. 125 | 126 | Returns 127 | ------- 128 | t : ndarray 129 | A numpy array where each value is a PCM sample of the envelope. 130 | If sonic_vector is input, t is the sonic vector with the tremolo 131 | applied to it. 132 | 133 | See Also 134 | -------- 135 | note_with_vibrato : A musical note with an oscillation of pitch. 136 | fm : A linear oscillation of fundamental frequency. 137 | am : A linear oscillation of amplitude. 138 | 139 | Examples 140 | -------- 141 | >>> W(V()*t()) # writes a WAV file of a note with tremolo 142 | >>> s = H([V()*t(fa=i, dB=j) for i, j in zip([6, 15, 100], [2, 1, 20])]) 143 | >>> s = H([t(fa=i, dB=j, sonic_vector=V()) for i, j in zip([6, 15, 100], 144 | [2, 1, 20])]) 145 | >>> envelope2 = t(440, 1.5, 60) # a lengthy envelope 146 | 147 | Notes 148 | ----- 149 | In the MASS framework implementation, for obtaining a sound with a tremolo 150 | (or AM), the tremolo pattern is considered separately from a synthesis of 151 | the sound. 152 | 153 | The vibrato and FM patterns are considering when synthesizing the sound. 154 | 155 | Cite the following article whenever you use this function. 156 | 157 | See the envelopes.am function. 158 | 159 | References 160 | ---------- 161 | .. [1] Fabbri, Renato, et al. "Musical elements in the discrete-time 162 | representation of sound." arXiv preprint arXiv:abs/1412.6853 (2017) 163 | 164 | """ 165 | 166 | waveform_table = np.array(waveform_table) 167 | if type(sonic_vector) in (np.ndarray, list): 168 | lambda_tremolo = len(sonic_vector) 169 | elif number_of_samples: 170 | lambda_tremolo = number_of_samples 171 | else: 172 | lambda_tremolo = np.floor(sample_rate * duration) 173 | samples = np.arange(lambda_tremolo) 174 | 175 | length = len(waveform_table) 176 | # indexes for LUT 177 | gamma_tremolo = (samples * tremolo_freq * length / 178 | sample_rate).astype(np.int64) 179 | # amplitude variation at each sample 180 | table_amp = waveform_table[gamma_tremolo % length] 181 | if alpha != 1: 182 | t = 10. ** ((table_amp * max_db_dev / 20) ** alpha) 183 | else: 184 | t = 10. ** (table_amp * max_db_dev / 20) 185 | if type(sonic_vector) in (np.ndarray, list): 186 | return t * sonic_vector 187 | else: 188 | return t 189 | 190 | 191 | def tremolos(durations=((3, 4, 5), (2, 3, 7, 4)), 192 | tremolo_freqs=((2, 6, 20), (5, 6.2, 21, 5)), 193 | max_db_devs=((10, 20, 1), (5, 7, 9, 2)), 194 | alpha=((1, 1, 1), (1, 1, 1, 9)), 195 | waveform_tables=((WAVEFORM_SINE, WAVEFORM_SINE, WAVEFORM_SINE), 196 | (WAVEFORM_TRIANGULAR, WAVEFORM_TRIANGULAR, 197 | WAVEFORM_TRIANGULAR, WAVEFORM_SINE)), 198 | number_of_samples=0, sonic_vector=0, sample_rate=44100): 199 | """ 200 | An envelope with multiple tremolos. 201 | 202 | Parameters 203 | ---------- 204 | durations : iterable of iterable of scalars 205 | the durations of each tremolo. 206 | tremolo_freqs : iterable of iterable of scalars 207 | The frequencies of each tremolo. 208 | max_db_devs : iterable of iterable of scalars 209 | The maximum loudness variation 210 | of each tremolo. 211 | alpha : iterable of iterable of scalars 212 | Indexes for distortion of each tremolo [1]. 213 | waveform_tables : iterable of iterable of array_likes 214 | Tables for lookup for each tremolo. 215 | number_of_samples : iterable of iterable of scalars 216 | The number of samples or each tremolo. 217 | sonic_vector : array_like 218 | The sound to which apply the tremolos. If supplied, the tremolo lines 219 | are applied to the sound and missing samples are completed by zeros 220 | (if sonic_vector is smaller then the lengthiest tremolo) or ones 221 | (is sonic_vector is larger). 222 | sample_rate : integer 223 | The sample rate 224 | 225 | Returns 226 | ------- 227 | s : ndarray 228 | A numpy array where each value is a value of the envelope for the PCM 229 | samples. If sonic_vector is supplied, e is the sonic vector with the 230 | envelope applied to it. 231 | 232 | See Also 233 | -------- 234 | loud : An envelope for a loudness transition. 235 | louds : An envelope with an arbitrary number of transitions. 236 | fade : Fade in and out. 237 | adsr : An ADSR envelope. 238 | tremolo : An oscillation of loudness. 239 | 240 | Examples 241 | -------- 242 | >>> W(V(d=8)*L_()) # writes a WAV file with a loudness transitions 243 | 244 | Notes 245 | ----- 246 | Cite the following article whenever you use this function. 247 | 248 | References 249 | ---------- 250 | .. [1] Fabbri, Renato, et al. "Musical elements in the discrete-time 251 | representation of sound." arXiv preprint arXiv:abs/1412.6853 (2017) 252 | 253 | 254 | """ 255 | waveform_tables = [ 256 | [np.array(wt) for wt in row] 257 | for row in waveform_tables 258 | ] 259 | t_ = [] 260 | if number_of_samples: 261 | for i, ns in enumerate(number_of_samples): 262 | t_.append([]) 263 | for j, ns_ in enumerate(ns): 264 | s = tremolo(tremolo_freq=tremolo_freqs[i][j], 265 | max_db_dev=max_db_devs[i][j], alpha=alpha[i][j], 266 | waveform_table=waveform_tables[i][j], 267 | number_of_samples=ns_, sample_rate=sample_rate) 268 | t_[-1].append(s) 269 | else: 270 | for i, durs in enumerate(durations): 271 | t_.append([]) 272 | for j, dur in enumerate(durs): 273 | s = tremolo(dur, tremolo_freqs[i][j], max_db_devs[i][j], 274 | alpha[i][j], waveform_table=waveform_tables[i][j], 275 | sample_rate=sample_rate) 276 | t_[-1].append(s) 277 | amax = 0 278 | if type(sonic_vector) in (np.ndarray, list): 279 | amax = len(sonic_vector) 280 | for i in range(len(t_)): 281 | t_[i] = np.hstack(t_[i]) 282 | amax = max(amax, len(t_[i])) 283 | for i in range(len(t_)): 284 | if len(t_[i]) < amax: 285 | t_[i] = np.hstack((t_[i], np.ones(amax - len(t_[i])) * t_[i][-1])) 286 | if type(sonic_vector) in (np.ndarray, list): 287 | if len(sonic_vector) < amax: 288 | sonic_vector = np.hstack((sonic_vector, 289 | np.zeros(amax - len(sonic_vector)))) 290 | t_.append(sonic_vector) 291 | s = np.prod(t_, axis=0) 292 | return s 293 | -------------------------------------------------------------------------------- /music/legacy/pieces/testSong2.py: -------------------------------------------------------------------------------- 1 | """Demonstration song used to exercise the legacy synthesizer.""" 2 | 3 | import music as M 4 | import numpy as np 5 | synth = M.legacy.CanonicalSynth 6 | 7 | 8 | class TestSong2: 9 | """ 10 | Class for testing the CanonicalSynth by generating various sounds and 11 | saving them to WAV files. 12 | 13 | Examples 14 | -------- 15 | >>> test_song = TestSong2() 16 | # TODO: Add more examples 17 | """ 18 | def __init__(self): 19 | """ 20 | Initializes the TestSong2 instance and generates various sounds using 21 | the CanonicalSynth. 22 | """ 23 | global M 24 | 25 | note = synth.render() 26 | M.core.io.write_wav_mono(note) # saved to fooname.wav 27 | 28 | note = synth.rawRender() 29 | note2 = synth.rawRender(duration=4.) 30 | # test: rawRender + applyAdsr 31 | note3 = synth.rawRender(duration=4., vibrato_frequency=4.) 32 | note4 = synth.rawRender(duration=4., vibrato_frequency=4., 33 | vibrato_depth=8.) 34 | note5 = synth.rawRender(duration=4., vibrato_frequency=1., 35 | vibrato_depth=8.) 36 | note6 = synth.rawRender(duration=4., vibrato_frequency=0.5, 37 | vibrato_depth=8.) 38 | notes = [note, note2, note3, note4, note5, note6] 39 | notes = [synth.adsrApply(i) for i in notes] 40 | vibratos = np.hstack(notes) 41 | # saved to vibratos.wav 42 | M.core.io.write_wav_mono(vibratos, "vibratos.wav") 43 | # tremoloEnvelope 44 | note6 = synth.rawRender(duration=4., vibrato_frequency=0.) 45 | note7 = synth.rawRender(fundamental_frequency=440., duration=4., 46 | vibrato_frequency=0.) 47 | te = synth.tremoloEnvelope(duration=4.) 48 | te2 = synth.tremoloEnvelope(duration=4.) 49 | te3 = synth.tremoloEnvelope(tremolo_depth=20, duration=4.) 50 | te4 = synth.tremoloEnvelope(duration=4., 51 | tremolo_table=synth.tables.triangle) 52 | 53 | notes = [note6 * te, note6 * te2, note6 * te3, note6 * te4] 54 | notes = [synth.adsrApply(i) for i in notes] 55 | tremolos = np.hstack(notes) 56 | # saved to tremolos.wav 57 | M.core.io.write_wav_mono(tremolos, "tremolos.wav") 58 | 59 | # tremolog + envelope 60 | R = synth.rawRender 61 | T = synth.tremoloEnvelope 62 | A = synth.adsrApply 63 | # == T(duration=4.) * R(duration=4.) 64 | notes = [T(sounduration=R(duration=4.)), 65 | T(duration=4.) * R(duration=4.), # sould sound the same 66 | T(tre_freq=4., duration=4.) * R(duration=4.), 67 | T(tre_freq=2., duration=4.) * R(duration=4., 68 | vibrato_frequency=4.), 69 | T(tre_freq=4., duration=4.) * R(duration=4., 70 | vibrato_frequency=4.), 71 | T(tre_freq=8., duration=4.) * R(duration=4., 72 | vibrato_frequency=4.), 73 | T(tre_freq=4., duration=4.) * R(duration=4., 74 | vibrato_frequency=8.)] 75 | notes = [synth.adsrApply(i) for i in notes] 76 | tremolos = np.hstack(notes) 77 | M.core.io.write_wav_mono(tremolos, "TV.wav") # saved to fooname.wav 78 | 79 | f0 = 220. 80 | M_ = M.utils.midi_to_hz_interval 81 | H = np.hstack 82 | R = synth.render2 83 | notes_ = [ 84 | T(tremolo_depth=2., duration=4.) * R(fundamental_frequency=f0 * 85 | M_(7), duration=4., 86 | vibrato_frequency=4.) + 87 | T(tremolo_depth=2., duration=4.) * R(fundamental_frequency=f0, 88 | duration=4., 89 | vibrato_frequency=4.), 90 | 91 | T(tremolo_depth=4., duration=4.) * 92 | H((R(fundamental_frequency=f0 * M_(7), duration=2., 93 | vibrato_frequency=4.), 94 | R(fundamental_frequency=f0 * M_(7), duration=2., 95 | vibrato_frequency=4.))) + 96 | T(tremolo_depth=4., duration=4.) * 97 | R(fundamental_frequency=f0, duration=4., vibrato_frequency=4.), 98 | 99 | T(tremolo_depth=2., duration=4.) * 100 | R(fundamental_frequency=f0 * M_(7), duration=4., 101 | vibrato_frequency=4.) + 102 | T(tremolo_depth=2., duration=4.) * 103 | R(fundamental_frequency=f0, duration=4., vibrato_frequency=4.), 104 | 105 | T(tremolo_depth=4., duration=4.) * 106 | R(fundamental_frequency=f0 * M_(7), duration=4., 107 | vibrato_frequency=2.) + 108 | T(tremolo_depth=4., duration=4.) * 109 | R(fundamental_frequency=f0, duration=4., vibrato_frequency=2.), 110 | 111 | T(tremolo_depth=6., duration=4.) * 112 | R(fundamental_frequency=f0 * M_(7), duration=4., 113 | vibrato_frequency=4.) + 114 | T(tremolo_depth=8., duration=4.) * 115 | R(fundamental_frequency=f0, duration=4., vibrato_frequency=2.), 116 | 117 | T(tremolo_depth=6., duration=4.) * 118 | R(fundamental_frequency=f0 * M_(-7), duration=4., 119 | vibrato_frequency=4.) + 120 | T(tremolo_depth=8., duration=4.) * 121 | H((R(fundamental_frequency=f0, duration=2., vibrato_frequency=2.), 122 | R(fundamental_frequency=f0, duration=2., 123 | vibrato_frequency=4.))), 124 | 125 | T(tremolo_depth=6., duration=4.) * 126 | R(fundamental_frequency=f0 * M_(-7), duration=4., 127 | vibrato_frequency=8.) + 128 | T(tremolo_depth=8., duration=4.) * 129 | H((R(fundamental_frequency=f0, duration=2., vibrato_frequency=2.), 130 | R(fundamental_frequency=f0, duration=2., 131 | vibrato_frequency=8.))), 132 | 133 | T(tremolo_depth=8., duration=4.) * 134 | R(fundamental_frequency=f0 * M_(-7), duration=4., 135 | vibrato_frequency=8.) + 136 | T(tremolo_depth=8., duration=4.) * 137 | H((R(fundamental_frequency=f0, duration=2., vibrato_frequency=2.), 138 | R(fundamental_frequency=f0, duration=2., 139 | vibrato_frequency=6.))), 140 | 141 | T(tremolo_depth=.5, duration=4.) * 142 | R(fundamental_frequency=f0 * M_(-7), duration=4., 143 | vibrato_frequency=1.) + 144 | T(tremolo_depth=.5, duration=4.) * 145 | H((R(fundamental_frequency=f0, duration=2., vibrato_frequency=2.), 146 | R(fundamental_frequency=f0, duration=2., 147 | vibrato_frequency=6.))), 148 | 149 | T(tremolo_depth=1., duration=4.) * 150 | R(fundamental_frequency=f0 * M_(-5), duration=4., 151 | vibrato_frequency=1.) + 152 | T(tremolo_depth=.5, duration=4.) * 153 | H((R(fundamental_frequency=f0, duration=2., vibrato_frequency=2.), 154 | R(fundamental_frequency=f0 * M_(2), duration=2., 155 | vibrato_frequency=6.))), 156 | 157 | T(tremolo_depth=2., duration=4.) * 158 | R(fundamental_frequency=f0 * M_(-5), duration=4., 159 | vibrato_frequency=1.) + 160 | T(tremolo_depth=1., duration=4.) * 161 | H((R(fundamental_frequency=f0, duration=2., vibrato_frequency=2.), 162 | R(fundamental_frequency=f0 * M_(2.2), duration=2., 163 | vibrato_frequency=8.))), 164 | 165 | T(tremolo_depth=2., duration=4.) * 166 | R(fundamental_frequency=f0 * M_(5), duration=4., 167 | vibrato_frequency=2.) + 168 | T(tremolo_depth=2., duration=4.) * 169 | H((R(fundamental_frequency=f0, duration=2., vibrato_frequency=2.), 170 | R(fundamental_frequency=f0 * M_(0.2), duration=2., 171 | vibrato_frequency=8.))), 172 | 173 | T(tremolo_depth=2., duration=4.) * 174 | R(fundamental_frequency=f0 * M_(5.2), duration=4., 175 | vibrato_frequency=2.) + 176 | T(tremolo_depth=2., duration=4.) * 177 | H((R(fundamental_frequency=f0, duration=2., vibrato_frequency=2.), 178 | R(fundamental_frequency=f0 * M_(-.2), duration=2., 179 | vibrato_frequency=8.))), 180 | 181 | T(tremolo_depth=2., duration=4.) * 182 | R(fundamental_frequency=f0 * M_(7.2), duration=4., 183 | vibrato_frequency=2.) + 184 | T(tremolo_depth=2., duration=4.) * 185 | H((R(fundamental_frequency=f0 * M_(-1), duration=2., 186 | vibrato_frequency=2.), R(fundamental_frequency=f0 * 187 | M_(-.2), duration=2., 188 | vibrato_frequency=8.))), 189 | 190 | T(tremolo_depth=2., duration=4.) * 191 | R(fundamental_frequency=f0 * M_(12.), duration=4., 192 | vibrato_frequency=2.) + 193 | T(tremolo_depth=2., duration=4.) * 194 | H((R(fundamental_frequency=f0 * M_(0), duration=2., 195 | vibrato_frequency=2.), R(fundamental_frequency=f0 * 196 | M_(4.2), duration=2., 197 | vibrato_frequency=6.))), 198 | 199 | T(tremolo_depth=2., duration=4.) * 200 | R(fundamental_frequency=f0 * M_(12.), duration=4., 201 | vibrato_frequency=2.) + 202 | T(tremolo_depth=2., duration=4.) * 203 | H((R(fundamental_frequency=f0 * M_(-12), duration=2., 204 | vibrato_frequency=2.), R(fundamental_frequency=f0, 205 | duration=2., 206 | vibrato_frequency=4.))), 207 | 208 | T(tremolo_depth=2., duration=4.) * 209 | R(fundamental_frequency=f0 * M_(7.), duration=4., 210 | vibrato_frequency=4.) + 211 | T(tremolo_depth=2., duration=4.) * 212 | H((R(fundamental_frequency=f0 * 213 | M_(-12), duration=2., vibrato_frequency=2.), 214 | R(fundamental_frequency=f0, duration=2., 215 | vibrato_frequency=4.))), 216 | ] 217 | 218 | # notes_=[synth.adsrApply(i) for i in notes_] 219 | # 220 | # == T(duration=4.) * R(duration=4.) 221 | # notes=[T(sounduration=R(duration=4.)), 222 | # T(duration=4.) * R(duration=4.), # sould sound the same 223 | # T(tre_freq=4., duration=4.) * R(duration=4.), 224 | # T(tre_freq=2., duration=4.) * R(duration=4., vibrato_frequency=4.), 225 | # T(tre_freq=4., duration=4.) * R(duration=4., vibrato_frequency=4.), 226 | # T(tre_freq=8., duration=4.) * R(duration=4., vibrato_frequency=4.), 227 | # T(tre_freq=4., duration=4.) * R(duration=4., 228 | # vibrato_frequency=8.)][::-1] 229 | # 230 | # notes=[synth.adsrApply(i) for i in notes] 231 | # 232 | # vibrosong=H(notes_+notes) 233 | locals_ = locals().copy() 234 | del locals_["self"] 235 | for i in locals_: 236 | exec("self.{}={}".format(i, i)) 237 | 238 | def render(self): 239 | A = synth.adsrApply 240 | H = np.hstack 241 | synth.adsrSetup(0, 0, 0, 3000) 242 | vibrosong = A(H(self.notes_ + [np.zeros(44100)])) 243 | M.core.io.write_wav_mono(vibrosong, "vibrosong.wav") 244 | -------------------------------------------------------------------------------- /music/core/filters/localization.py: -------------------------------------------------------------------------------- 1 | """Stereo localization filters and related helpers.""" 2 | 3 | import numpy as np 4 | import warnings 5 | from music.core.synths.notes import note, note_with_phase 6 | from music.utils import WAVEFORM_SINE 7 | 8 | 9 | def localize(sonic_vector=note(), theta=0, distance=0, x=.1, y=.01, 10 | zeta=0.215, air_temp=20, sample_rate=44100): 11 | """ 12 | Make a mono sound stereo and localize it by a very naive method. 13 | 14 | See bellow for implementation notes. 15 | 16 | Parameters 17 | ---------- 18 | sonic_vector : array_like 19 | An one dimensional with the PCM samples of the sound. 20 | x : scalar 21 | The lateral component of the position in meters. 22 | y : scalar 23 | The frontal component of the position in meters. 24 | theta : scalar 25 | The azimuthal angle of the position in degrees. If theta is supplied, 26 | x and y are ignored and dist must also be supplied for the sound 27 | localization to have effect. 28 | distance : scalar 29 | The distance of the source from the listener in meters. 30 | zeta : scalar 31 | The distance between the ears in meters. 32 | air_temp : scalar 33 | The temperature in Celsius used for calculating the speed of sound. 34 | sample_rate : integer 35 | The sample rate. 36 | 37 | Returns 38 | ------- 39 | s : ndarray 40 | A (2, nsamples) shaped array with the PCM samples of the stereo sound. 41 | 42 | See Also 43 | -------- 44 | reverb : A reverberator. 45 | localize2 : a less naive implementation of localization by ITD and IID. 46 | # FIXME: hrtf? 47 | hrtf : performs localization by means of a Head Related Transfer Function. 48 | 49 | Examples 50 | -------- 51 | >>> write_wav_stereo(localize()) 52 | >>> write_wav_stereo(horizontal_stack([ 53 | ... localize(note_with_vibrato(duration=1), x=i, y=j) 54 | ... for i, j in zip([.1, .7, np.pi - .1, np.pi - .7], 55 | ... [.1, .1, .1, .1])])) 56 | 57 | 58 | Notes 59 | ----- 60 | Uses the most naive ITD and IID calculations as described in [1]. A less 61 | naive method is implemented in localize2(). Nonetheless, if dist is small 62 | enough (e.g. <.3), the perception of theta occurs and might be used. 63 | The advantages of this method are: 64 | - It is fast. 65 | - It is simple. 66 | - It is true to sound propagation phenomenon (although it does not 67 | consider the human body beyond the localization of the ears). 68 | - It can be used easily for tweaks (such as for a moving source 69 | resulting in a Doppler Effect). 70 | 71 | When az = tan^{-1}(y/x) lies in the 'cone of confusion', many values of x 72 | and y have the same ITD and IID [1]. Furthermore, lateral sources have the 73 | low frequencies diffracted and reach the opposite ear with a delay of 74 | ~0.7s [1]. The height of a source and if it is in front or behind a 75 | listener are cues given by the HRTF [1]. These issues are not taken into 76 | account in this function. 77 | 78 | The value of zeta is ~0.215 for adult humans [1]. 79 | 80 | This implementation assumes that the speed of sound (in air) is 81 | s = 331.2 + 0.606 * temp. 82 | 83 | Cite the following article whenever you use this function. 84 | 85 | References 86 | ---------- 87 | .. [1] Fabbri, Renato, et al. "Musical elements in the discrete-time 88 | representation of sound." arXiv preprint arXiv:abs/1412.6853 (2017) 89 | 90 | """ 91 | if theta: 92 | theta = 2 * np.pi * theta / 360 93 | x = np.cos(theta) * distance 94 | y = np.sin(theta) * distance 95 | speed = 331.3 + .606 * air_temp 96 | 97 | dr = np.sqrt((x - zeta / 2) ** 2 + y ** 2) # distance from right ear 98 | dl = np.sqrt((x + zeta / 2) ** 2 + y ** 2) # distance from left ear 99 | 100 | iid_a = dr / dl # proportion of amplitudes from left to right ear 101 | itd = (dl - dr) / speed # seconds 102 | lambda_itd = int(itd * sample_rate) 103 | 104 | if x > 0: 105 | tl = np.hstack((np.zeros(lambda_itd), iid_a * sonic_vector)) 106 | tr = np.hstack((sonic_vector, np.zeros(lambda_itd))) 107 | else: 108 | tl = np.hstack((sonic_vector, np.zeros(-lambda_itd))) 109 | tr = np.hstack((np.zeros(-lambda_itd), sonic_vector * (1 / iid_a))) 110 | s = np.vstack((tl, tr)) 111 | return s 112 | 113 | 114 | def localize_linear(sonic_vector=note(), theta1=90, theta2=0, dist=.1, 115 | zeta=0.215, air_temp=20, sample_rate=44100): 116 | """ 117 | A linear variation of the localize function. 118 | 119 | See localize. 120 | 121 | """ 122 | theta1 = 2 * np.pi * theta1 / 360 123 | x1 = np.cos(theta1) * dist 124 | y1 = np.sin(theta1) * dist 125 | theta2 = 2 * np.pi * theta2 / 360 126 | x2 = np.cos(theta2) * dist 127 | y2 = np.sin(theta2) * dist 128 | speed = 331.3 + .606 * air_temp 129 | 130 | lambda_l = len(sonic_vector) 131 | L = lambda_l # FIXME: assure L is lambda_l 132 | l_ = L - 1 133 | xpos = x1 + (x2 - x1) * np.arange(lambda_l) / l_ 134 | ypos = y1 + (y2 - y1) * np.arange(lambda_l) / l_ 135 | d = np.sqrt((xpos - zeta / 2) ** 2 + ypos ** 2) 136 | d2 = np.sqrt((xpos + zeta / 2) ** 2 + ypos ** 2) 137 | iid_a = d / d2 138 | itd = (d2 - d) / speed 139 | lambda_itd = int(itd * sample_rate) 140 | 141 | if x1 > 0: 142 | tl = np.zeros(lambda_itd) 143 | tr = np.array([]) 144 | else: 145 | tl = np.array([]) 146 | tr = np.zeros(-lambda_itd) 147 | d_ = d[1:] - d[:-1] 148 | d2_ = d2[1:] - d2[:-1] 149 | d__ = np.cumsum(d_).astype(np.int64) 150 | d2__ = np.cumsum(d2_).astype(np.int64) 151 | # FIXME: here we have missing the correct use of the variables calculated 152 | # and also the return statement 153 | return iid_a, tl, tr, d__, d2__ 154 | 155 | 156 | def localize2(sonic_vector=note(), theta=-70, x=.1, y=.01, zeta=0.215, 157 | air_temp=20, method="ifft", sample_rate=44100): 158 | """ 159 | Make a mono sound stereo and localize it by experimental methods. 160 | 161 | See bellow for implementation notes. These implementations are not 162 | standard and are only to illustrate the method of using ITD and IID that 163 | are frequency dependent. 164 | 165 | Parameters 166 | ---------- 167 | sonic_vector : array_like 168 | An one dimensional with the PCM samples of the sound. 169 | x : scalar 170 | The lateral component of the position in meters. 171 | y : scalar 172 | The frontal component of the position in meters. 173 | theta : scalar 174 | The azimuthal angle of the position in degrees. If theta is supplied, 175 | x and y are ignored and dist must also be supplied for the sound 176 | localization to have effect. 177 | zeta : scalar 178 | The distance between the ears in meters. 179 | air_temp : scalar 180 | The temperature in Celsius used for calculating 181 | the speed of sound. 182 | method : string 183 | Set to "ifft" for a working method that changes the fourier spectral 184 | coefficients. Set to "brute" for using an implementation that 185 | sinthesizes each sinusoid in the fourier spectrum separately 186 | (currently not giving good results for all sounds). 187 | sample_rate : integer 188 | The sample rate. 189 | 190 | Returns 191 | ------- 192 | s : ndarray 193 | A (2, nsamples) shaped array with the PCM samples of the stereo sound. 194 | 195 | See Also 196 | -------- 197 | reverb : A reverberator. 198 | localize : a more naive and fast implementation of localization by ITD and 199 | IID. 200 | # FIXME: hrtf? 201 | hrtf : performs localization by means of a Head Related Transfer Function. 202 | 203 | Examples 204 | -------- 205 | >>> write_wav_stereo(localized2()) 206 | >>> write_wav_stereo(horizontal_stack([ 207 | ... localized2(note_with_vibrato(duration=1), x=i, y=j) 208 | ... for i, j in zip([.1, .7, np.pi - .1, np.pi - .7], 209 | ... [.1, .1, .1, .1])])) 210 | 211 | Notes 212 | ----- 213 | Uses a less naive ITD and IID calculations as described in [1]. 214 | 215 | See localize() for further notes. 216 | 217 | Cite the following article whenever you use this function. 218 | 219 | References 220 | ---------- 221 | .. [1] Fabbri, Renato, et al. "Musical elements in the discrete-time 222 | representation of sound." arXiv preprint arXiv:abs/1412.6853 (2017) 223 | 224 | """ 225 | if method not in ("ifft", "brute"): 226 | raise ValueError("The only methods implemented are ifft and brute") 227 | if not theta: 228 | theta_ = np.arctan2(-x, y) 229 | else: 230 | theta_ = 2 * np.pi * theta / 360 231 | theta_ = np.arcsin(np.sin(theta_)) # sign of theta is used 232 | speed = 331.3 + .606 * air_temp 233 | 234 | c = np.fft.fft(sonic_vector) 235 | norms = np.abs(c) 236 | angles = np.angle(c) 237 | 238 | lambda_l = len(sonic_vector) 239 | max_coef = int(lambda_l / 2) 240 | df = 2 * sample_rate / lambda_l 241 | 242 | # zero theta in right ahead and counter-clockwise is positive 243 | # theta_ = 2*np.pi*theta/360 244 | freqs = np.arange(max_coef) * df 245 | # max_size = len(sonic_vector) + 300*zeta*np.sin(theta_)*fs 246 | # s = np.zeros( (2, max_size) ) 247 | if method == "ifft": 248 | normsl = np.copy(norms) 249 | anglesl = np.copy(angles) 250 | normsr = np.copy(norms) 251 | anglesr = np.copy(angles) 252 | else: 253 | # limit the number of coeffs considered 254 | s = [] 255 | energy = np.cumsum(norms[:max_coef] ** 2) 256 | p = 0.01 257 | cutoff = energy.max() * (1 - p) 258 | ncoeffs = (energy < cutoff).sum() 259 | maxfreq = ncoeffs * df 260 | if maxfreq <= 4000: 261 | foo = .3 262 | else: 263 | foo = .2 264 | maxsize = len(sonic_vector) + sample_rate * foo * np.sin( 265 | abs(theta_)) / speed 266 | s = np.zeros((2, maxsize)) 267 | 268 | if method == "ifft": 269 | # ITD implies a phase change 270 | # IID implies a change in the norm 271 | for i in range(max_coef): 272 | if i == 0: 273 | continue 274 | f = freqs[i] 275 | if f <= 4000: 276 | itd = .3 * zeta * np.sin(theta_) / speed 277 | else: 278 | itd = .2 * zeta * np.sin(theta_) / speed 279 | iid = 1 + ((f / 1000) ** .8) * np.sin(abs(theta_)) 280 | # not needed, coefs are duplicated afterwards: 281 | # if i != Lambda/2: 282 | # IID *= 2 283 | # IID > 0 : left ear has amplification 284 | # ITD > 0 : right ear has a delay 285 | # relate ITD to phase change (anglesl) 286 | lamb = 1 / f 287 | if theta_ > 0: 288 | change = itd - (itd // lamb) * lamb 289 | change_ = (change / lamb) * 2 * np.pi 290 | anglesr[i] += change_ 291 | normsl[i] *= iid 292 | else: 293 | itd = -itd 294 | change = itd - (itd // lamb) * lamb 295 | change_ = (change / lamb) * 2 * np.pi 296 | anglesl[i] += change_ 297 | normsr[i] *= iid 298 | 299 | elif method == "brute": 300 | warnings.warn("This can take a long time...") 301 | for i in range(ncoeffs): 302 | if i == 0: 303 | continue 304 | f = freqs[i] 305 | if f <= 4000: 306 | itd = .3 * zeta * np.sin(theta_) / speed 307 | else: 308 | itd = .2 * zeta * np.sin(theta_) / speed 309 | iid = 1 + ((f / 1000) ** .8) * np.sin(abs(theta_)) 310 | # IID > 0 : left ear has amplification 311 | # ITD > 0 : right ear has a delay 312 | itd_l = abs(int(sample_rate * itd)) 313 | if i == lambda_l / 2: 314 | amplitude = norms[i] / lambda_l 315 | else: 316 | amplitude = 2 * norms[i] / lambda_l 317 | sine = note_with_phase(freq=f, number_of_samples=lambda_l, 318 | waveform_table=WAVEFORM_SINE, 319 | sample_rate=sample_rate, 320 | phase=angles[i]) * amplitude 321 | 322 | # Account for phase and energy 323 | if theta_ > 0: 324 | tl = sine * iid 325 | tr = np.copy(sine) 326 | else: 327 | tl = np.copy(sine) 328 | tr = sine * iid 329 | 330 | if theta > 0: 331 | tl = np.hstack((tl, np.zeros(itd_l))) 332 | tr = np.hstack((np.zeros(itd_l), tr)) 333 | else: 334 | tl = np.hstack((np.zeros(itd_l), tl)) 335 | tr = np.hstack((tr, np.zeros(itd_l))) 336 | 337 | tl = np.hstack((tl, np.zeros(maxsize - len(tl)))) 338 | tr = np.hstack((tr, np.zeros(maxsize - len(tr)))) 339 | s_ = np.vstack((tl, tr)) 340 | s += s_ 341 | if method == "ifft": 342 | coefsl = normsl * np.e ** (anglesl * 1j) 343 | coefsl[max_coef + 1:] = np.real( 344 | coefsl[1:max_coef])[::-1] - 1j * np.imag( 345 | coefsl[1:max_coef])[::-1] 346 | sl = np.fft.ifft(coefsl).real 347 | 348 | coefsr = normsr * np.e ** (anglesr * 1j) 349 | coefsr[max_coef + 1:] = np.real( 350 | coefsr[1:max_coef])[::-1] - 1j * np.imag( 351 | coefsr[1:max_coef])[::-1] 352 | sr = np.fft.ifft(coefsr).real 353 | s = np.vstack((sl, sr)) 354 | # If in need to force energy to be preserved, try: 355 | # energy1 = np.sum(sonic_vector**2) 356 | # energy2 = np.sum(s**2) 357 | # s = s*(energy1/energy2)**.5 358 | return s 359 | --------------------------------------------------------------------------------