├── README.md ├── audio spectrum_pt1_waveform_viewer.ipynb ├── audio spectrum_pt2_spectrum_analyzer.ipynb ├── audio_spectrum.py ├── audio_spectrumQT.py ├── pygraphGL_multsine.py ├── spec.py ├── spec_anim.py ├── terrain.py └── terrain_audio.py /README.md: -------------------------------------------------------------------------------- 1 | # Audio-Spectrum-Analyzer-in-Python 2 | A live audio spectrum analyzer using pyAudio, matplotlib and scipy. 3 | 4 | ``` 5 | pip install numpy 6 | pip install matplotlib 7 | pip install scipy 8 | 9 | # Installing pyAudio 10 | pip install pipwin 11 | pipwin install pyaudio 12 | 13 | # Running the spectrum analyzer 14 | python spec_anim.py 15 | ``` 16 | 17 | 18 | 19 | Made from a YouTube tutorial series found [here](https://www.youtube.com/watch?v=AShHJdSIxkY). 20 | 21 | A series of Jupyter notebooks and python files which stream audio from a microphone using pyaudio. 22 | 23 | [Part 1](https://www.youtube.com/watch?v=AShHJdSIxkY) is a notebook which streams audio and displays the waveform with matplotlib. 24 | 25 | [Part 2](https://www.youtube.com/watch?v=aQKX3mrDFoY) adds a spectrum viewer using scipy.fftpack to compute the FFT. 26 | 27 | -------------------------------------------------------------------------------- /audio spectrum_pt1_waveform_viewer.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": 20, 6 | "metadata": {}, 7 | "outputs": [], 8 | "source": [ 9 | "\"\"\"\n", 10 | "Notebook for streaming data from a microphone in realtime\n", 11 | "\n", 12 | "audio is captured using pyaudio\n", 13 | "then converted from binary data to ints using struct\n", 14 | "then displayed using matplotlib\n", 15 | "\n", 16 | "if you don't have pyaudio, then run\n", 17 | "\n", 18 | ">>> pip install pyaudio\n", 19 | "\n", 20 | "note: with 2048 samples per chunk, I'm getting 20FPS\n", 21 | "\"\"\"\n", 22 | "\n", 23 | "import pyaudio\n", 24 | "import os\n", 25 | "import struct\n", 26 | "import numpy as np\n", 27 | "import matplotlib.pyplot as plt\n", 28 | "import time\n", 29 | "from tkinter import TclError\n", 30 | "\n", 31 | "# use this backend to display in separate Tk window\n", 32 | "%matplotlib tk\n", 33 | "\n", 34 | "# constants\n", 35 | "CHUNK = 1024 * 2 # samples per frame\n", 36 | "FORMAT = pyaudio.paInt16 # audio format (bytes per sample?)\n", 37 | "CHANNELS = 1 # single channel for microphone\n", 38 | "RATE = 44100 # samples per second" 39 | ] 40 | }, 41 | { 42 | "cell_type": "code", 43 | "execution_count": 21, 44 | "metadata": { 45 | "scrolled": false 46 | }, 47 | "outputs": [ 48 | { 49 | "name": "stdout", 50 | "output_type": "stream", 51 | "text": [ 52 | "stream started\n", 53 | "stream stopped\n", 54 | "average frame rate = 21 FPS\n" 55 | ] 56 | } 57 | ], 58 | "source": [ 59 | "# create matplotlib figure and axes\n", 60 | "fig, ax = plt.subplots(1, figsize=(15, 7))\n", 61 | "\n", 62 | "# pyaudio class instance\n", 63 | "p = pyaudio.PyAudio()\n", 64 | "\n", 65 | "# stream object to get data from microphone\n", 66 | "stream = p.open(\n", 67 | " format=FORMAT,\n", 68 | " channels=CHANNELS,\n", 69 | " rate=RATE,\n", 70 | " input=True,\n", 71 | " output=True,\n", 72 | " frames_per_buffer=CHUNK\n", 73 | ")\n", 74 | "\n", 75 | "# variable for plotting\n", 76 | "x = np.arange(0, 2 * CHUNK, 2)\n", 77 | "\n", 78 | "# create a line object with random data\n", 79 | "line, = ax.plot(x, np.random.rand(CHUNK), '-', lw=2)\n", 80 | "\n", 81 | "# basic formatting for the axes\n", 82 | "ax.set_title('AUDIO WAVEFORM')\n", 83 | "ax.set_xlabel('samples')\n", 84 | "ax.set_ylabel('volume')\n", 85 | "ax.set_ylim(0, 255)\n", 86 | "ax.set_xlim(0, 2 * CHUNK)\n", 87 | "plt.setp(ax, xticks=[0, CHUNK, 2 * CHUNK], yticks=[0, 128, 255])\n", 88 | "\n", 89 | "# show the plot\n", 90 | "plt.show(block=False)\n", 91 | "\n", 92 | "print('stream started')\n", 93 | "\n", 94 | "# for measuring frame rate\n", 95 | "frame_count = 0\n", 96 | "start_time = time.time()\n", 97 | "\n", 98 | "while True:\n", 99 | " \n", 100 | " # binary data\n", 101 | " data = stream.read(CHUNK) \n", 102 | " \n", 103 | " # convert data to integers, make np array, then offset it by 127\n", 104 | " data_int = struct.unpack(str(2 * CHUNK) + 'B', data)\n", 105 | " \n", 106 | " # create np array and offset by 128\n", 107 | " data_np = np.array(data_int, dtype='b')[::2] + 128\n", 108 | " \n", 109 | " line.set_ydata(data_np)\n", 110 | " \n", 111 | " # update figure canvas\n", 112 | " try:\n", 113 | " fig.canvas.draw()\n", 114 | " fig.canvas.flush_events()\n", 115 | " frame_count += 1\n", 116 | " \n", 117 | " except TclError:\n", 118 | " \n", 119 | " # calculate average frame rate\n", 120 | " frame_rate = frame_count / (time.time() - start_time)\n", 121 | " \n", 122 | " print('stream stopped')\n", 123 | " print('average frame rate = {:.0f} FPS'.format(frame_rate))\n", 124 | " break" 125 | ] 126 | }, 127 | { 128 | "cell_type": "code", 129 | "execution_count": null, 130 | "metadata": { 131 | "collapsed": true 132 | }, 133 | "outputs": [], 134 | "source": [] 135 | } 136 | ], 137 | "metadata": { 138 | "anaconda-cloud": {}, 139 | "kernelspec": { 140 | "display_name": "Python [conda root]", 141 | "language": "python", 142 | "name": "conda-root-py" 143 | }, 144 | "language_info": { 145 | "codemirror_mode": { 146 | "name": "ipython", 147 | "version": 3 148 | }, 149 | "file_extension": ".py", 150 | "mimetype": "text/x-python", 151 | "name": "python", 152 | "nbconvert_exporter": "python", 153 | "pygments_lexer": "ipython3", 154 | "version": "3.5.3" 155 | } 156 | }, 157 | "nbformat": 4, 158 | "nbformat_minor": 1 159 | } 160 | -------------------------------------------------------------------------------- /audio spectrum_pt2_spectrum_analyzer.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": 40, 6 | "metadata": { 7 | "collapsed": true 8 | }, 9 | "outputs": [], 10 | "source": [ 11 | "\"\"\"\n", 12 | "Notebook for streaming data from a microphone in realtime\n", 13 | "\n", 14 | "audio is captured using pyaudio\n", 15 | "then converted from binary data to ints using struct\n", 16 | "then displayed using matplotlib\n", 17 | "\n", 18 | "scipy.fftpack computes the FFT\n", 19 | "\n", 20 | "if you don't have pyaudio, then run\n", 21 | "\n", 22 | ">>> pip install pyaudio\n", 23 | "\n", 24 | "note: with 2048 samples per chunk, I'm getting 20FPS\n", 25 | "when also running the spectrum, its about 15FPS\n", 26 | "\"\"\"\n", 27 | "\n", 28 | "import pyaudio\n", 29 | "import os\n", 30 | "import struct\n", 31 | "import numpy as np\n", 32 | "import matplotlib.pyplot as plt\n", 33 | "from scipy.fftpack import fft\n", 34 | "import time\n", 35 | "from tkinter import TclError\n", 36 | "\n", 37 | "# to display in separate Tk window\n", 38 | "%matplotlib tk\n", 39 | "\n", 40 | "# constants\n", 41 | "CHUNK = 1024 * 2 # samples per frame\n", 42 | "FORMAT = pyaudio.paInt16 # audio format (bytes per sample?)\n", 43 | "CHANNELS = 1 # single channel for microphone\n", 44 | "RATE = 44100 # samples per second" 45 | ] 46 | }, 47 | { 48 | "cell_type": "code", 49 | "execution_count": 54, 50 | "metadata": { 51 | "scrolled": false 52 | }, 53 | "outputs": [ 54 | { 55 | "name": "stdout", 56 | "output_type": "stream", 57 | "text": [ 58 | "stream started\n", 59 | "stream stopped\n", 60 | "average frame rate = 15 FPS\n" 61 | ] 62 | } 63 | ], 64 | "source": [ 65 | "# create matplotlib figure and axes\n", 66 | "fig, (ax1, ax2) = plt.subplots(2, figsize=(15, 7))\n", 67 | "\n", 68 | "# pyaudio class instance\n", 69 | "p = pyaudio.PyAudio()\n", 70 | "\n", 71 | "# stream object to get data from microphone\n", 72 | "stream = p.open(\n", 73 | " format=FORMAT,\n", 74 | " channels=CHANNELS,\n", 75 | " rate=RATE,\n", 76 | " input=True,\n", 77 | " output=True,\n", 78 | " frames_per_buffer=CHUNK\n", 79 | ")\n", 80 | "\n", 81 | "# variable for plotting\n", 82 | "x = np.arange(0, 2 * CHUNK, 2) # samples (waveform)\n", 83 | "xf = np.linspace(0, RATE, CHUNK) # frequencies (spectrum)\n", 84 | "\n", 85 | "# create a line object with random data\n", 86 | "line, = ax1.plot(x, np.random.rand(CHUNK), '-', lw=2)\n", 87 | "\n", 88 | "# create semilogx line for spectrum\n", 89 | "line_fft, = ax2.semilogx(xf, np.random.rand(CHUNK), '-', lw=2)\n", 90 | "\n", 91 | "# Signal range is -32k to 32k\n", 92 | "# limiting amplitude to +/- 4k\n", 93 | "AMPLITUDE_LIMIT = 4096\n", 94 | "\n", 95 | "# format waveform axes\n", 96 | "ax1.set_title('AUDIO WAVEFORM')\n", 97 | "ax1.set_xlabel('samples')\n", 98 | "ax1.set_ylabel('volume')\n", 99 | "ax1.set_ylim(-AMPLITUDE_LIMIT, AMPLITUDE_LIMIT)\n", 100 | "ax1.set_xlim(0, 2 * CHUNK)\n", 101 | "plt.setp(ax1, xticks=[0, CHUNK, 2 * CHUNK], yticks=[-AMPLITUDE_LIMIT, 0, AMPLITUDE_LIMIT])\n", 102 | "\n", 103 | "# format spectrum axes\n", 104 | "ax2.set_xlim(20, RATE / 2)\n", 105 | "\n", 106 | "print('stream started')\n", 107 | "\n", 108 | "# for measuring frame rate\n", 109 | "frame_count = 0\n", 110 | "start_time = time.time()\n", 111 | "\n", 112 | "while True:\n", 113 | " \n", 114 | " # binary data\n", 115 | " data = stream.read(CHUNK) \n", 116 | "\n", 117 | " data_np = np.frombuffer(data, dtype='h')\n", 118 | " \n", 119 | " line.set_ydata(data_np)\n", 120 | " \n", 121 | " # compute FFT and update line\n", 122 | " yf = fft(data_np)\n", 123 | " line_fft.set_ydata(np.abs(yf[0:CHUNK]) / (512 * CHUNK))\n", 124 | " \n", 125 | " # update figure canvas\n", 126 | " try:\n", 127 | " fig.canvas.draw()\n", 128 | " fig.canvas.flush_events()\n", 129 | " frame_count += 1\n", 130 | " \n", 131 | " except TclError:\n", 132 | " \n", 133 | " # calculate average frame rate\n", 134 | " frame_rate = frame_count / (time.time() - start_time)\n", 135 | " \n", 136 | " print('stream stopped')\n", 137 | " print('average frame rate = {:.0f} FPS'.format(frame_rate))\n", 138 | " break" 139 | ] 140 | }, 141 | { 142 | "cell_type": "code", 143 | "execution_count": null, 144 | "metadata": { 145 | "collapsed": true 146 | }, 147 | "outputs": [], 148 | "source": [] 149 | } 150 | ], 151 | "metadata": { 152 | "anaconda-cloud": {}, 153 | "kernelspec": { 154 | "display_name": "Python [conda root]", 155 | "language": "python", 156 | "name": "conda-root-py" 157 | }, 158 | "language_info": { 159 | "codemirror_mode": { 160 | "name": "ipython", 161 | "version": 3 162 | }, 163 | "file_extension": ".py", 164 | "mimetype": "text/x-python", 165 | "name": "python", 166 | "nbconvert_exporter": "python", 167 | "pygments_lexer": "ipython3", 168 | "version": "3.5.3" 169 | } 170 | }, 171 | "nbformat": 4, 172 | "nbformat_minor": 1 173 | } 174 | -------------------------------------------------------------------------------- /audio_spectrum.py: -------------------------------------------------------------------------------- 1 | """ 2 | Notebook for streaming data from a microphone in realtime 3 | 4 | audio is captured using pyaudio 5 | then converted from binary data to ints using struct 6 | then displayed using matplotlib 7 | 8 | scipy.fftpack computes the FFT 9 | 10 | if you don't have pyaudio, then run 11 | 12 | >>> pip install pyaudio 13 | 14 | note: with 2048 samples per chunk, I'm getting 20FPS 15 | when also running the spectrum, its about 15FPS 16 | """ 17 | import matplotlib.pyplot as plt 18 | import numpy as np 19 | import pyaudio 20 | from pyqtgraph.Qt import QtGui, QtCore 21 | import pyqtgraph as pg 22 | import struct 23 | from scipy.fftpack import fft 24 | import sys 25 | import time 26 | 27 | 28 | class AudioStream(object): 29 | def __init__(self): 30 | 31 | # stream constants 32 | self.CHUNK = 1024 * 2 33 | self.FORMAT = pyaudio.paInt16 34 | self.CHANNELS = 1 35 | self.RATE = 44100 36 | self.pause = False 37 | 38 | # stream object 39 | self.p = pyaudio.PyAudio() 40 | self.stream = self.p.open( 41 | format=self.FORMAT, 42 | channels=self.CHANNELS, 43 | rate=self.RATE, 44 | input=True, 45 | output=True, 46 | frames_per_buffer=self.CHUNK, 47 | ) 48 | self.init_plots() 49 | self.start_plot() 50 | 51 | def init_plots(self): 52 | 53 | # x variables for plotting 54 | x = np.arange(0, 2 * self.CHUNK, 2) 55 | xf = np.linspace(0, self.RATE, self.CHUNK) 56 | 57 | # create matplotlib figure and axes 58 | self.fig, (ax1, ax2) = plt.subplots(2, figsize=(15, 7)) 59 | self.fig.canvas.mpl_connect('button_press_event', self.onClick) 60 | 61 | # create a line object with random data 62 | self.line, = ax1.plot(x, np.random.rand(self.CHUNK), '-', lw=2) 63 | 64 | # create semilogx line for spectrum 65 | self.line_fft, = ax2.semilogx( 66 | xf, np.random.rand(self.CHUNK), '-', lw=2) 67 | 68 | # format waveform axes 69 | ax1.set_title('AUDIO WAVEFORM') 70 | ax1.set_xlabel('samples') 71 | ax1.set_ylabel('volume') 72 | ax1.set_ylim(0, 255) 73 | ax1.set_xlim(0, 2 * self.CHUNK) 74 | plt.setp( 75 | ax1, yticks=[0, 128, 255], 76 | xticks=[0, self.CHUNK, 2 * self.CHUNK], 77 | ) 78 | plt.setp(ax2, yticks=[0, 1],) 79 | 80 | # format spectrum axes 81 | ax2.set_xlim(20, self.RATE / 2) 82 | 83 | # show axes 84 | thismanager = plt.get_current_fig_manager() 85 | thismanager.window.setGeometry(5, 120, 1910, 1070) 86 | plt.show(block=False) 87 | 88 | def start_plot(self): 89 | 90 | print('stream started') 91 | frame_count = 0 92 | start_time = time.time() 93 | 94 | while not self.pause: 95 | data = self.stream.read(self.CHUNK) 96 | data_int = struct.unpack(str(2 * self.CHUNK) + 'B', data) 97 | data_np = np.array(data_int, dtype='b')[::2] + 128 98 | 99 | self.line.set_ydata(data_np) 100 | 101 | # compute FFT and update line 102 | yf = fft(data_int) 103 | self.line_fft.set_ydata( 104 | np.abs(yf[0:self.CHUNK]) / (128 * self.CHUNK)) 105 | 106 | # update figure canvas 107 | self.fig.canvas.draw() 108 | self.fig.canvas.flush_events() 109 | frame_count += 1 110 | 111 | else: 112 | self.fr = frame_count / (time.time() - start_time) 113 | print('average frame rate = {:.0f} FPS'.format(self.fr)) 114 | self.exit_app() 115 | 116 | def exit_app(self): 117 | print('stream closed') 118 | self.p.close(self.stream) 119 | 120 | def onClick(self, event): 121 | self.pause = True 122 | 123 | 124 | if __name__ == '__main__': 125 | AudioStream() 126 | -------------------------------------------------------------------------------- /audio_spectrumQT.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from pyqtgraph.Qt import QtGui, QtCore 3 | import pyqtgraph as pg 4 | 5 | import struct 6 | import pyaudio 7 | from scipy.fftpack import fft 8 | 9 | import sys 10 | import time 11 | 12 | 13 | class AudioStream(object): 14 | def __init__(self): 15 | 16 | # pyqtgraph stuff 17 | pg.setConfigOptions(antialias=True) 18 | self.traces = dict() 19 | self.app = QtGui.QApplication(sys.argv) 20 | self.win = pg.GraphicsWindow(title='Spectrum Analyzer') 21 | self.win.setWindowTitle('Spectrum Analyzer') 22 | self.win.setGeometry(5, 115, 1910, 1070) 23 | 24 | wf_xlabels = [(0, '0'), (2048, '2048'), (4096, '4096')] 25 | wf_xaxis = pg.AxisItem(orientation='bottom') 26 | wf_xaxis.setTicks([wf_xlabels]) 27 | 28 | wf_ylabels = [(0, '0'), (127, '128'), (255, '255')] 29 | wf_yaxis = pg.AxisItem(orientation='left') 30 | wf_yaxis.setTicks([wf_ylabels]) 31 | 32 | sp_xlabels = [ 33 | (np.log10(10), '10'), (np.log10(100), '100'), 34 | (np.log10(1000), '1000'), (np.log10(22050), '22050') 35 | ] 36 | sp_xaxis = pg.AxisItem(orientation='bottom') 37 | sp_xaxis.setTicks([sp_xlabels]) 38 | 39 | self.waveform = self.win.addPlot( 40 | title='WAVEFORM', row=1, col=1, axisItems={'bottom': wf_xaxis, 'left': wf_yaxis}, 41 | ) 42 | self.spectrum = self.win.addPlot( 43 | title='SPECTRUM', row=2, col=1, axisItems={'bottom': sp_xaxis}, 44 | ) 45 | 46 | # pyaudio stuff 47 | self.FORMAT = pyaudio.paInt16 48 | self.CHANNELS = 1 49 | self.RATE = 44100 50 | self.CHUNK = 1024 * 2 51 | 52 | self.p = pyaudio.PyAudio() 53 | self.stream = self.p.open( 54 | format=self.FORMAT, 55 | channels=self.CHANNELS, 56 | rate=self.RATE, 57 | input=True, 58 | output=True, 59 | frames_per_buffer=self.CHUNK, 60 | ) 61 | # waveform and spectrum x points 62 | self.x = np.arange(0, 2 * self.CHUNK, 2) 63 | self.f = np.linspace(0, self.RATE / 2, self.CHUNK / 2) 64 | 65 | def start(self): 66 | if (sys.flags.interactive != 1) or not hasattr(QtCore, 'PYQT_VERSION'): 67 | QtGui.QApplication.instance().exec_() 68 | 69 | def set_plotdata(self, name, data_x, data_y): 70 | if name in self.traces: 71 | self.traces[name].setData(data_x, data_y) 72 | else: 73 | if name == 'waveform': 74 | self.traces[name] = self.waveform.plot(pen='c', width=3) 75 | self.waveform.setYRange(0, 255, padding=0) 76 | self.waveform.setXRange(0, 2 * self.CHUNK, padding=0.005) 77 | if name == 'spectrum': 78 | self.traces[name] = self.spectrum.plot(pen='m', width=3) 79 | self.spectrum.setLogMode(x=True, y=True) 80 | self.spectrum.setYRange(-4, 0, padding=0) 81 | self.spectrum.setXRange( 82 | np.log10(20), np.log10(self.RATE / 2), padding=0.005) 83 | 84 | def update(self): 85 | wf_data = self.stream.read(self.CHUNK) 86 | wf_data = struct.unpack(str(2 * self.CHUNK) + 'B', wf_data) 87 | wf_data = np.array(wf_data, dtype='b')[::2] + 128 88 | self.set_plotdata(name='waveform', data_x=self.x, data_y=wf_data,) 89 | 90 | sp_data = fft(np.array(wf_data, dtype='int8') - 128) 91 | sp_data = np.abs(sp_data[0:int(self.CHUNK / 2)] 92 | ) * 2 / (128 * self.CHUNK) 93 | self.set_plotdata(name='spectrum', data_x=self.f, data_y=sp_data) 94 | 95 | def animation(self): 96 | timer = QtCore.QTimer() 97 | timer.timeout.connect(self.update) 98 | timer.start(20) 99 | self.start() 100 | 101 | 102 | if __name__ == '__main__': 103 | 104 | audio_app = AudioStream() 105 | audio_app.animation() 106 | -------------------------------------------------------------------------------- /pygraphGL_multsine.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | Animated 3D sinc function 4 | 5 | requires: 6 | 1. pyqtgraph 7 | - download from here http://www.pyqtgraph.org/ 8 | 2. pyopenGL 9 | - if you have Anaconda, run the following command 10 | >>> conda install -c anaconda pyopengl 11 | """ 12 | 13 | from pyqtgraph.Qt import QtCore, QtGui 14 | import pyqtgraph.opengl as gl 15 | import pyqtgraph as pg 16 | import numpy as np 17 | import sys 18 | import time 19 | 20 | 21 | class Visualizer(object): 22 | def __init__(self): 23 | self.traces = dict() 24 | self.app = QtGui.QApplication(sys.argv) 25 | self.w = gl.GLViewWidget() 26 | self.w.opts['distance'] = 40 27 | self.w.setWindowTitle('pyqtgraph example: GLLinePlotItem') 28 | self.w.setGeometry(0, 110, 1920, 1080) 29 | self.w.show() 30 | 31 | self.phase = 0 32 | self.lines = 50 33 | self.points = 1000 34 | self.y = np.linspace(-10, 10, self.lines) 35 | self.x = np.linspace(-10, 10, self.points) 36 | 37 | for i, line in enumerate(self.y): 38 | y = np.array([line] * self.points) 39 | d = np.sqrt(self.x ** 2 + y ** 2) 40 | sine = 10 * np.sin(d + self.phase) 41 | pts = np.vstack([self.x, y, sine]).transpose() 42 | self.traces[i] = gl.GLLinePlotItem( 43 | pos=pts, 44 | color=pg.glColor((i, self.lines * 1.3)), 45 | width=(i + 1) / 10, 46 | antialias=True 47 | ) 48 | self.w.addItem(self.traces[i]) 49 | 50 | def start(self): 51 | if (sys.flags.interactive != 1) or not hasattr(QtCore, 'PYQT_VERSION'): 52 | QtGui.QApplication.instance().exec_() 53 | 54 | def set_plotdata(self, name, points, color, width): 55 | self.traces[name].setData(pos=points, color=color, width=width) 56 | 57 | def update(self): 58 | stime = time.time() 59 | for i, line in enumerate(self.y): 60 | y = np.array([line] * self.points) 61 | 62 | amp = 10 / (i + 1) 63 | phase = self.phase * (i + 1) - 10 64 | freq = self.x * (i + 1) / 10 65 | 66 | sine = amp * np.sin(freq - phase) 67 | pts = np.vstack([self.x, y, sine]).transpose() 68 | 69 | self.set_plotdata( 70 | name=i, points=pts, 71 | color=pg.glColor((i, self.lines * 1.3)), 72 | width=3 73 | ) 74 | self.phase -= .0002 75 | 76 | print('{:.0f} FPS'.format(1 / (time.time() - stime))) 77 | 78 | def animation(self): 79 | timer = QtCore.QTimer() 80 | timer.timeout.connect(self.update) 81 | timer.start(10) 82 | self.start() 83 | 84 | 85 | # Start event loop. 86 | if __name__ == '__main__': 87 | v = Visualizer() 88 | v.animation() 89 | -------------------------------------------------------------------------------- /spec.py: -------------------------------------------------------------------------------- 1 | ''' 2 | pip install pyAudio may not work 3 | Instead download and install a wheel from here: 4 | https://www.lfd.uci.edu/~gohlke/pythonlibs/#pyaudio 5 | 6 | or use: 7 | 8 | pip install pipwin 9 | pipwin install pyaudio 10 | 11 | pipwin is like pip, but it installs precompiled Windows binaries provided by Christoph Gohlke. 12 | ''' 13 | 14 | # to display in separate Tk window 15 | import matplotlib 16 | matplotlib.use('TkAgg') 17 | 18 | import pyaudio 19 | import os 20 | import struct 21 | import numpy as np 22 | import matplotlib.pyplot as plt 23 | from scipy.fftpack import fft 24 | import time 25 | from tkinter import TclError 26 | 27 | # ------------ Audio Setup --------------- 28 | # constants 29 | CHUNK = 1024 * 2 # samples per frame 30 | FORMAT = pyaudio.paInt16 # audio format (bytes per sample?) 31 | CHANNELS = 1 # single channel for microphone 32 | RATE = 44100 # samples per second 33 | # Signal range is -32k to 32k 34 | # limiting amplitude to +/- 4k 35 | AMPLITUDE_LIMIT = 4096 36 | 37 | # pyaudio class instance 38 | p = pyaudio.PyAudio() 39 | 40 | # stream object to get data from microphone 41 | stream = p.open( 42 | format=FORMAT, 43 | channels=CHANNELS, 44 | rate=RATE, 45 | input=True, 46 | output=True, 47 | frames_per_buffer=CHUNK 48 | ) 49 | 50 | # ------------ Plot Setup --------------- 51 | # create matplotlib figure and axes 52 | # Use interactive mode 53 | plt.ion() 54 | fig, (ax1, ax2) = plt.subplots(2, figsize=(15, 7)) 55 | # variable for plotting 56 | x = np.arange(0, 2 * CHUNK, 2) # samples (waveform) 57 | xf = np.linspace(0, RATE, CHUNK) # frequencies (spectrum) 58 | 59 | # create a line object with random data 60 | line, = ax1.plot(x, np.random.rand(CHUNK), '-', lw=2) 61 | 62 | # create semilogx line for spectrum, to plot the waveform as log not lin 63 | line_fft, = ax2.semilogx(xf, np.random.rand(CHUNK), '-', lw=2) 64 | 65 | # format waveform axes 66 | ax1.set_title('AUDIO WAVEFORM') 67 | ax1.set_xlabel('samples') 68 | ax1.set_ylabel('volume') 69 | ax1.set_ylim(-AMPLITUDE_LIMIT, AMPLITUDE_LIMIT) 70 | ax1.set_xlim(0, 2 * CHUNK) 71 | plt.setp(ax1, xticks=[0, CHUNK, 2 * CHUNK], yticks=[-AMPLITUDE_LIMIT, 0, AMPLITUDE_LIMIT]) 72 | 73 | # format spectrum axes 74 | ax2.set_xlim(20, RATE / 2) 75 | print('stream started') 76 | 77 | if __name__ == '__main__': 78 | # for measuring frame rate 79 | frame_count = 0 80 | start_time = time.time() 81 | 82 | while True: 83 | 84 | # binary data 85 | data = stream.read(CHUNK) 86 | # Open in numpy as a buffer 87 | data_np = np.frombuffer(data, dtype='h') 88 | 89 | # Update the line graph 90 | line.set_ydata(data_np) 91 | 92 | # compute FFT and update line 93 | yf = fft(data_np) 94 | # The fft will return complex numbers, so np.abs will return their magnitude 95 | 96 | line_fft.set_ydata(np.abs(yf[0:CHUNK]) / (512 * CHUNK)) 97 | 98 | # update figure canvas 99 | try: 100 | fig.canvas.draw() 101 | fig.canvas.flush_events() 102 | frame_count += 1 103 | 104 | except TclError: 105 | 106 | # calculate average frame rate 107 | frame_rate = frame_count / (time.time() - start_time) 108 | 109 | print('stream stopped') 110 | print('average frame rate = {:.0f} FPS'.format(frame_rate)) 111 | break -------------------------------------------------------------------------------- /spec_anim.py: -------------------------------------------------------------------------------- 1 | ''' 2 | pip install pyAudio may not work 3 | Instead download and install a wheel from here: 4 | https://www.lfd.uci.edu/~gohlke/pythonlibs/#pyaudio 5 | 6 | or use: 7 | 8 | pip install pipwin 9 | pipwin install pyaudio 10 | 11 | pipwin is like pip, but it installs precompiled Windows binaries provided by Christoph Gohlke. 12 | ''' 13 | 14 | # to display in separate Tk window 15 | import matplotlib 16 | matplotlib.use('TkAgg') 17 | 18 | import pyaudio 19 | import os 20 | import struct 21 | import numpy as np 22 | import matplotlib.pyplot as plt 23 | from matplotlib import animation 24 | from scipy.fftpack import fft 25 | import time 26 | from tkinter import TclError 27 | 28 | # Counts the frames 29 | # why the list? https://stackoverflow.com/questions/25040323/unable-to-reference-one-particular-variable-declared-outside-a-function 30 | count = [0] 31 | 32 | # ------------ Audio Setup --------------- 33 | # constants 34 | CHUNK = 1024 * 2 # samples per frame 35 | FORMAT = pyaudio.paInt16 # audio format (bytes per sample?) 36 | CHANNELS = 1 # single channel for microphone 37 | RATE = 44100 # samples per second 38 | # Signal range is -32k to 32k 39 | # limiting amplitude to +/- 4k 40 | AMPLITUDE_LIMIT = 4096 41 | 42 | # pyaudio class instance 43 | p = pyaudio.PyAudio() 44 | 45 | # stream object to get data from microphone 46 | stream = p.open( 47 | format=FORMAT, 48 | channels=CHANNELS, 49 | rate=RATE, 50 | input=True, 51 | output=True, 52 | frames_per_buffer=CHUNK 53 | ) 54 | 55 | # ------------ Plot Setup --------------- 56 | fig, (ax1, ax2) = plt.subplots(2, figsize=(15, 7)) 57 | # variable for plotting 58 | x = np.arange(0, 2 * CHUNK, 2) # samples (waveform) 59 | xf = np.linspace(0, RATE, CHUNK) # frequencies (spectrum) 60 | 61 | # create a line object with random data 62 | line, = ax1.plot(x, np.random.rand(CHUNK), '-', lw=2) 63 | 64 | # create semilogx line for spectrum, to plot the waveform as log not lin 65 | line_fft, = ax2.semilogx(xf, np.random.rand(CHUNK), '-', lw=2) 66 | 67 | # format waveform axes 68 | ax1.set_title('AUDIO WAVEFORM') 69 | ax1.set_xlabel('samples') 70 | ax1.set_ylabel('volume') 71 | ax1.set_ylim(-AMPLITUDE_LIMIT, AMPLITUDE_LIMIT) 72 | ax1.set_xlim(0, 2 * CHUNK) 73 | plt.setp(ax1, xticks=[0, CHUNK, 2 * CHUNK], yticks=[-AMPLITUDE_LIMIT, 0, AMPLITUDE_LIMIT]) 74 | 75 | # format spectrum axes 76 | ax2.set_xlim(20, RATE / 2) 77 | print('stream started') 78 | 79 | def on_close(evt): 80 | print("Closing") 81 | # calculate average frame rate 82 | frame_rate = count[0] / (time.time() - start_time) 83 | 84 | # Close the stream and terminate pyAudio 85 | stream.stop_stream() 86 | stream.close() 87 | p.terminate() 88 | print('stream stopped') 89 | print('average frame rate = {:.0f} FPS'.format(frame_rate)) 90 | quit() 91 | 92 | 93 | def animate(i): 94 | # binary data 95 | data = stream.read(CHUNK) 96 | # Open in numpy as a buffer 97 | data_np = np.frombuffer(data, dtype='h') 98 | 99 | # Update the line graph 100 | line.set_ydata(data_np) 101 | 102 | # compute FFT and update line 103 | yf = fft(data_np) 104 | # The fft will return complex numbers, so np.abs will return their magnitude 105 | 106 | line_fft.set_ydata(np.abs(yf[0:CHUNK]) / (512 * CHUNK)) 107 | 108 | # Update the number of frames 109 | count[0] += 1 110 | 111 | if __name__ == '__main__': 112 | start_time = time.time() 113 | 114 | anim = animation.FuncAnimation(fig, animate, blit=False, interval=1) 115 | fig.canvas.mpl_connect('close_event', on_close) 116 | plt.show() 117 | -------------------------------------------------------------------------------- /terrain.py: -------------------------------------------------------------------------------- 1 | """ 2 | This creates a 3D mesh with perlin noise to simulate 3 | a terrain. The mesh is animated by shifting the noise 4 | to give a "fly-over" effect. 5 | 6 | If you don't have pyOpenGL or opensimplex, then: 7 | 8 | - conda install -c anaconda pyopengl 9 | - pip install opensimplex 10 | """ 11 | 12 | import numpy as np 13 | from pyqtgraph.Qt import QtCore, QtGui 14 | import pyqtgraph.opengl as gl 15 | import sys 16 | from opensimplex import OpenSimplex 17 | 18 | 19 | class Terrain(object): 20 | def __init__(self): 21 | """ 22 | Initialize the graphics window and mesh 23 | """ 24 | 25 | # setup the view window 26 | self.app = QtGui.QApplication(sys.argv) 27 | self.w = gl.GLViewWidget() 28 | self.w.setGeometry(0, 110, 1920, 1080) 29 | self.w.show() 30 | self.w.setWindowTitle('Terrain') 31 | self.w.setCameraPosition(distance=30, elevation=8) 32 | 33 | # constants and arrays 34 | self.nsteps = 1 35 | self.ypoints = range(-20, 22, self.nsteps) 36 | self.xpoints = range(-20, 22, self.nsteps) 37 | self.nfaces = len(self.ypoints) 38 | self.offset = 0 39 | 40 | # perlin noise object 41 | self.tmp = OpenSimplex() 42 | 43 | # create the veritices array 44 | verts = np.array([ 45 | [ 46 | x, y, 1.5 * self.tmp.noise2d(x=n / 5, y=m / 5) 47 | ] for n, x in enumerate(self.xpoints) for m, y in enumerate(self.ypoints) 48 | ], dtype=np.float32) 49 | 50 | # create the faces and colors arrays 51 | faces = [] 52 | colors = [] 53 | for m in range(self.nfaces - 1): 54 | yoff = m * self.nfaces 55 | for n in range(self.nfaces - 1): 56 | faces.append([n + yoff, yoff + n + self.nfaces, yoff + n + self.nfaces + 1]) 57 | faces.append([n + yoff, yoff + n + 1, yoff + n + self.nfaces + 1]) 58 | colors.append([0, 0, 0, 0]) 59 | colors.append([0, 0, 0, 0]) 60 | 61 | faces = np.array(faces) 62 | colors = np.array(colors) 63 | 64 | # create the mesh item 65 | self.m1 = gl.GLMeshItem( 66 | vertexes=verts, 67 | faces=faces, faceColors=colors, 68 | smooth=False, drawEdges=True, 69 | ) 70 | self.m1.setGLOptions('additive') 71 | self.w.addItem(self.m1) 72 | 73 | def update(self): 74 | """ 75 | update the mesh and shift the noise each time 76 | """ 77 | verts = np.array([ 78 | [ 79 | x, y, 2.5 * self.tmp.noise2d(x=n / 5 + self.offset, y=m / 5 + self.offset) 80 | ] for n, x in enumerate(self.xpoints) for m, y in enumerate(self.ypoints) 81 | ], dtype=np.float32) 82 | 83 | faces = [] 84 | colors = [] 85 | for m in range(self.nfaces - 1): 86 | yoff = m * self.nfaces 87 | for n in range(self.nfaces - 1): 88 | faces.append([n + yoff, yoff + n + self.nfaces, yoff + n + self.nfaces + 1]) 89 | faces.append([n + yoff, yoff + n + 1, yoff + n + self.nfaces + 1]) 90 | colors.append([n / self.nfaces, 1 - n / self.nfaces, m / self.nfaces, 0.7]) 91 | colors.append([n / self.nfaces, 1 - n / self.nfaces, m / self.nfaces, 0.8]) 92 | 93 | faces = np.array(faces, dtype=np.uint32) 94 | colors = np.array(colors, dtype=np.float32) 95 | 96 | self.m1.setMeshData( 97 | vertexes=verts, faces=faces, faceColors=colors 98 | ) 99 | self.offset -= 0.18 100 | 101 | def start(self): 102 | """ 103 | get the graphics window open and setup 104 | """ 105 | if (sys.flags.interactive != 1) or not hasattr(QtCore, 'PYQT_VERSION'): 106 | QtGui.QApplication.instance().exec_() 107 | 108 | def animation(self): 109 | """ 110 | calls the update method to run in a loop 111 | """ 112 | timer = QtCore.QTimer() 113 | timer.timeout.connect(self.update) 114 | timer.start(10) 115 | self.start() 116 | self.update() 117 | 118 | 119 | if __name__ == '__main__': 120 | t = Terrain() 121 | t.animation() 122 | -------------------------------------------------------------------------------- /terrain_audio.py: -------------------------------------------------------------------------------- 1 | """ 2 | This creates a 3D mesh with perlin noise to simulate 3 | a terrain. The mesh is animated by shifting the noise 4 | to give a "fly-over" effect. 5 | 6 | If you don't have pyOpenGL or opensimplex, then: 7 | 8 | - conda install -c anaconda pyopengl 9 | - pip install opensimplex 10 | """ 11 | 12 | import numpy as np 13 | from opensimplex import OpenSimplex 14 | import pyqtgraph.opengl as gl 15 | from pyqtgraph.Qt import QtCore, QtGui 16 | import struct 17 | import pyaudio 18 | import sys 19 | 20 | 21 | class Terrain(object): 22 | def __init__(self): 23 | """ 24 | Initialize the graphics window and mesh surface 25 | """ 26 | 27 | # setup the view window 28 | self.app = QtGui.QApplication(sys.argv) 29 | self.window = gl.GLViewWidget() 30 | self.window.setWindowTitle('Terrain') 31 | self.window.setGeometry(0, 110, 1920, 1080) 32 | self.window.setCameraPosition(distance=30, elevation=12) 33 | self.window.show() 34 | 35 | # constants and arrays 36 | self.nsteps = 1.3 37 | self.offset = 0 38 | self.ypoints = np.arange(-20, 20 + self.nsteps, self.nsteps) 39 | self.xpoints = np.arange(-20, 20 + self.nsteps, self.nsteps) 40 | self.nfaces = len(self.ypoints) 41 | 42 | self.RATE = 44100 43 | self.CHUNK = len(self.xpoints) * len(self.ypoints) 44 | 45 | self.p = pyaudio.PyAudio() 46 | self.stream = self.p.open( 47 | format=pyaudio.paInt16, 48 | channels=1, 49 | rate=self.RATE, 50 | input=True, 51 | output=True, 52 | frames_per_buffer=self.CHUNK, 53 | ) 54 | 55 | # perlin noise object 56 | self.noise = OpenSimplex() 57 | 58 | # create the veritices array 59 | verts, faces, colors = self.mesh() 60 | 61 | self.mesh1 = gl.GLMeshItem( 62 | faces=faces, 63 | vertexes=verts, 64 | faceColors=colors, 65 | drawEdges=True, 66 | smooth=False, 67 | ) 68 | self.mesh1.setGLOptions('additive') 69 | self.window.addItem(self.mesh1) 70 | 71 | def mesh(self, offset=0, height=2.5, wf_data=None): 72 | 73 | if wf_data is not None: 74 | wf_data = struct.unpack(str(2 * self.CHUNK) + 'B', wf_data) 75 | wf_data = np.array(wf_data, dtype='b')[::2] + 128 76 | wf_data = np.array(wf_data, dtype='int32') - 128 77 | wf_data = wf_data * 0.04 78 | wf_data = wf_data.reshape((len(self.xpoints), len(self.ypoints))) 79 | else: 80 | wf_data = np.array([1] * 1024) 81 | wf_data = wf_data.reshape((len(self.xpoints), len(self.ypoints))) 82 | 83 | faces = [] 84 | colors = [] 85 | verts = np.array([ 86 | [ 87 | x, y, wf_data[xid][yid] * self.noise.noise2d(x=xid / 5 + offset, y=yid / 5 + offset) 88 | ] for xid, x in enumerate(self.xpoints) for yid, y in enumerate(self.ypoints) 89 | ], dtype=np.float32) 90 | 91 | for yid in range(self.nfaces - 1): 92 | yoff = yid * self.nfaces 93 | for xid in range(self.nfaces - 1): 94 | faces.append([ 95 | xid + yoff, 96 | xid + yoff + self.nfaces, 97 | xid + yoff + self.nfaces + 1, 98 | ]) 99 | faces.append([ 100 | xid + yoff, 101 | xid + yoff + 1, 102 | xid + yoff + self.nfaces + 1, 103 | ]) 104 | colors.append([ 105 | xid / self.nfaces, 1 - xid / self.nfaces, yid / self.nfaces, 0.7 106 | ]) 107 | colors.append([ 108 | xid / self.nfaces, 1 - xid / self.nfaces, yid / self.nfaces, 0.8 109 | ]) 110 | 111 | faces = np.array(faces, dtype=np.uint32) 112 | colors = np.array(colors, dtype=np.float32) 113 | 114 | return verts, faces, colors 115 | 116 | def update(self): 117 | """ 118 | update the mesh and shift the noise each time 119 | """ 120 | 121 | wf_data = self.stream.read(self.CHUNK) 122 | 123 | verts, faces, colors = self.mesh(offset=self.offset, wf_data=wf_data) 124 | self.mesh1.setMeshData(vertexes=verts, faces=faces, faceColors=colors) 125 | self.offset -= 0.05 126 | 127 | def start(self): 128 | """ 129 | get the graphics window open and setup 130 | """ 131 | if (sys.flags.interactive != 1) or not hasattr(QtCore, 'PYQT_VERSION'): 132 | QtGui.QApplication.instance().exec_() 133 | 134 | def animation(self, frametime=10): 135 | """ 136 | calls the update method to run in a loop 137 | """ 138 | timer = QtCore.QTimer() 139 | timer.timeout.connect(self.update) 140 | timer.start(frametime) 141 | self.start() 142 | 143 | 144 | if __name__ == '__main__': 145 | t = Terrain() 146 | t.animation() 147 | --------------------------------------------------------------------------------