├── .dockerignore
├── .gitignore
├── .travis.yml
├── Experiments.ipynb
├── README.md
├── TODO.md
├── braindump.md
├── data
└── results
│ └── 20190511-2353-0e60
│ ├── 0.confusion.npz
│ ├── 0.device.json
│ ├── 0.layers.csv
│ ├── 1.confusion.npz
│ ├── 1.device.json
│ ├── 1.layers.csv
│ ├── 2.confusion.npz
│ ├── 2.device.json
│ ├── 2.layers.csv
│ ├── 3.confusion.npz
│ ├── 3.device.json
│ ├── 3.layers.csv
│ ├── 4.confusion.npz
│ ├── 4.device.json
│ ├── 4.layers.csv
│ ├── 5.confusion.npz
│ ├── 5.device.json
│ ├── 5.layers.csv
│ ├── 6.confusion.npz
│ ├── 6.device.json
│ ├── 6.layers.csv
│ ├── 7.confusion.npz
│ ├── 7.device.json
│ ├── 7.layers.csv
│ ├── 8.confusion.npz
│ ├── 8.device.json
│ ├── 8.layers.csv
│ ├── 9.confusion.npz
│ ├── 9.device.json
│ ├── 9.layers.csv
│ └── stm32stats.csv
├── environment.yml
├── experiments
├── 16k30_256hop.yaml
├── 16k30_512hop.yaml
├── baselines.csv
└── ldcnn20k60.yaml
├── firmware
└── perftest
│ ├── .gitignore
│ ├── .mxproject
│ ├── Drivers
│ ├── CMSIS
│ │ ├── DSP_Lib
│ │ │ └── Source
│ │ │ │ └── BasicMathFunctions
│ │ │ │ └── arm_dot_prod_f32.c
│ │ ├── Device
│ │ │ └── ST
│ │ │ │ └── STM32L4xx
│ │ │ │ └── Include
│ │ │ │ ├── stm32l476xx.h
│ │ │ │ ├── stm32l4xx.h
│ │ │ │ └── system_stm32l4xx.h
│ │ └── Include
│ │ │ ├── arm_common_tables.h
│ │ │ ├── arm_const_structs.h
│ │ │ ├── arm_math.h
│ │ │ ├── cmsis_armcc.h
│ │ │ ├── cmsis_armcc_V6.h
│ │ │ ├── cmsis_gcc.h
│ │ │ ├── core_cm0.h
│ │ │ ├── core_cm0plus.h
│ │ │ ├── core_cm3.h
│ │ │ ├── core_cm4.h
│ │ │ ├── core_cm7.h
│ │ │ ├── core_cmFunc.h
│ │ │ ├── core_cmInstr.h
│ │ │ ├── core_cmSimd.h
│ │ │ ├── core_sc000.h
│ │ │ └── core_sc300.h
│ └── STM32L4xx_HAL_Driver
│ │ ├── Inc
│ │ ├── Legacy
│ │ │ └── stm32_hal_legacy.h
│ │ ├── stm32l4xx_hal.h
│ │ ├── stm32l4xx_hal_cortex.h
│ │ ├── stm32l4xx_hal_crc.h
│ │ ├── stm32l4xx_hal_crc_ex.h
│ │ ├── stm32l4xx_hal_def.h
│ │ ├── stm32l4xx_hal_dma.h
│ │ ├── stm32l4xx_hal_dma_ex.h
│ │ ├── stm32l4xx_hal_flash.h
│ │ ├── stm32l4xx_hal_flash_ex.h
│ │ ├── stm32l4xx_hal_flash_ramfunc.h
│ │ ├── stm32l4xx_hal_gpio.h
│ │ ├── stm32l4xx_hal_gpio_ex.h
│ │ ├── stm32l4xx_hal_i2c.h
│ │ ├── stm32l4xx_hal_i2c_ex.h
│ │ ├── stm32l4xx_hal_pwr.h
│ │ ├── stm32l4xx_hal_pwr_ex.h
│ │ ├── stm32l4xx_hal_rcc.h
│ │ ├── stm32l4xx_hal_rcc_ex.h
│ │ ├── stm32l4xx_hal_tim.h
│ │ ├── stm32l4xx_hal_tim_ex.h
│ │ ├── stm32l4xx_hal_uart.h
│ │ └── stm32l4xx_hal_uart_ex.h
│ │ └── Src
│ │ ├── stm32l4xx_hal.c
│ │ ├── stm32l4xx_hal_cortex.c
│ │ ├── stm32l4xx_hal_crc.c
│ │ ├── stm32l4xx_hal_crc_ex.c
│ │ ├── stm32l4xx_hal_dma.c
│ │ ├── stm32l4xx_hal_dma_ex.c
│ │ ├── stm32l4xx_hal_flash.c
│ │ ├── stm32l4xx_hal_flash_ex.c
│ │ ├── stm32l4xx_hal_flash_ramfunc.c
│ │ ├── stm32l4xx_hal_gpio.c
│ │ ├── stm32l4xx_hal_i2c.c
│ │ ├── stm32l4xx_hal_i2c_ex.c
│ │ ├── stm32l4xx_hal_pwr.c
│ │ ├── stm32l4xx_hal_pwr_ex.c
│ │ ├── stm32l4xx_hal_rcc.c
│ │ ├── stm32l4xx_hal_rcc_ex.c
│ │ ├── stm32l4xx_hal_tim.c
│ │ ├── stm32l4xx_hal_tim_ex.c
│ │ ├── stm32l4xx_hal_uart.c
│ │ └── stm32l4xx_hal_uart_ex.c
│ ├── Inc
│ ├── RTE_Components.h
│ ├── app_x-cube-ai.h
│ ├── bsp_ai.h
│ ├── constants_ai.h
│ ├── main.h
│ ├── stm32l4xx_hal_conf.h
│ └── stm32l4xx_it.h
│ ├── Makefile
│ ├── Middlewares
│ └── ST
│ │ ├── AI
│ │ └── AI
│ │ │ ├── data
│ │ │ ├── network_data.c
│ │ │ └── network_data.h
│ │ │ ├── include
│ │ │ ├── ai_common_config.h
│ │ │ ├── ai_datatypes_defines.h
│ │ │ ├── ai_datatypes_format.h
│ │ │ ├── ai_datatypes_internal.h
│ │ │ ├── ai_log.h
│ │ │ ├── ai_math_helpers.h
│ │ │ ├── ai_memory_manager.h
│ │ │ ├── ai_network_inspector.h
│ │ │ ├── ai_platform.h
│ │ │ ├── ai_platform_interface.h
│ │ │ ├── core_common.h
│ │ │ ├── core_conv2d_kernels_float.h
│ │ │ ├── core_datatypes.h
│ │ │ ├── core_lists.h
│ │ │ ├── core_log.h
│ │ │ ├── core_net_inspect.h
│ │ │ ├── core_net_inspect_interface.h
│ │ │ ├── core_utils.h
│ │ │ ├── datatypes_network.h
│ │ │ ├── formats_list.h
│ │ │ ├── layers.h
│ │ │ ├── layers_common.h
│ │ │ ├── layers_conv2d.h
│ │ │ ├── layers_cycles_estimation.h
│ │ │ ├── layers_generic.h
│ │ │ ├── layers_lambda.h
│ │ │ ├── layers_list.h
│ │ │ ├── layers_nl.h
│ │ │ ├── layers_norm.h
│ │ │ ├── layers_override.h
│ │ │ ├── layers_pool.h
│ │ │ ├── layers_rnn.h
│ │ │ ├── network.h
│ │ │ └── platform_config.h
│ │ │ ├── lib
│ │ │ └── network_runtime.a
│ │ │ └── src
│ │ │ └── network.c
│ │ └── Application
│ │ └── SystemPerformance
│ │ ├── Inc
│ │ └── aiSystemPerformance.h
│ │ └── Src
│ │ └── aiSystemPerformance.c
│ ├── STM32L476RGTx_FLASH.ld
│ ├── Src
│ ├── app_x-cube-ai.c
│ ├── main.c
│ ├── stm32l4xx_hal_msp.c
│ ├── stm32l4xx_it.c
│ └── system_stm32l4xx.c
│ ├── ai34-systemperformance.ioc
│ ├── flash.sh
│ ├── readperf.py
│ └── startup_stm32l476xx.s
├── jobs.py
├── microesc
├── __init__.py
├── common.py
├── datasets
│ └── UrbanSound8K.csv
├── features.py
├── jobs.py
├── livedemo.py
├── models
│ ├── __init__.py
│ ├── crnn.py
│ ├── dcnn.py
│ ├── dense.py
│ ├── dilated.py
│ ├── dmix.py
│ ├── effnet.py
│ ├── ldcnn.py
│ ├── mobilenet.py
│ ├── piczakcnn.py
│ ├── sbcnn.py
│ ├── skm.py
│ ├── speech.py
│ ├── squeezenet.py
│ └── strided.py
├── perf.py
├── preprocess.py
├── report.py
├── settings.py
├── stats.py
├── stm32convert.py
├── test.py
├── train.py
├── urbansound8k.py
└── variations.py
├── model.py
├── models.csv
├── models16k.csv
├── plan.html
├── preprocess.py
├── presentation
├── Makefile
├── img
│ ├── cover.png
│ ├── cover.svg
│ ├── cpu-efficiency.png
│ ├── fail-dropout.png
│ ├── fail-truncation.png
│ ├── models-list.png
│ ├── nmbu_logo_eng_rgb.jpg
│ ├── nmbu_logo_eng_rgb_trans.png
│ ├── noise-monitoring.jpg
│ ├── results.png
│ ├── soundsensing-logo.png
│ ├── soundsensing-logo.xcf
│ ├── stoykart.png
│ ├── training-settings.png
│ ├── unknown-class-tradeoffs.png
│ ├── unknown-class.png
│ └── xcubeai.png
├── notes.md
├── presentation.md
├── reveal.js
│ ├── css
│ │ ├── print
│ │ │ ├── paper.css
│ │ │ └── pdf.css
│ │ ├── reveal.css
│ │ ├── reveal.scss
│ │ └── theme
│ │ │ ├── README.md
│ │ │ ├── beige.css
│ │ │ ├── black.css
│ │ │ ├── blood.css
│ │ │ ├── league.css
│ │ │ ├── moon.css
│ │ │ ├── night.css
│ │ │ ├── serif.css
│ │ │ ├── simple.css
│ │ │ ├── sky.css
│ │ │ ├── solarized.css
│ │ │ ├── source
│ │ │ ├── beige.scss
│ │ │ ├── black.scss
│ │ │ ├── blood.scss
│ │ │ ├── league.scss
│ │ │ ├── moon.scss
│ │ │ ├── night.scss
│ │ │ ├── serif.scss
│ │ │ ├── simple.scss
│ │ │ ├── sky.scss
│ │ │ ├── solarized.scss
│ │ │ └── white.scss
│ │ │ ├── template
│ │ │ ├── mixins.scss
│ │ │ ├── settings.scss
│ │ │ └── theme.scss
│ │ │ └── white.css
│ ├── js
│ │ └── reveal.js
│ ├── lib
│ │ ├── css
│ │ │ └── zenburn.css
│ │ ├── font
│ │ │ ├── league-gothic
│ │ │ │ ├── LICENSE
│ │ │ │ ├── league-gothic.css
│ │ │ │ ├── league-gothic.eot
│ │ │ │ ├── league-gothic.ttf
│ │ │ │ └── league-gothic.woff
│ │ │ └── source-sans-pro
│ │ │ │ ├── LICENSE
│ │ │ │ ├── source-sans-pro-italic.eot
│ │ │ │ ├── source-sans-pro-italic.ttf
│ │ │ │ ├── source-sans-pro-italic.woff
│ │ │ │ ├── source-sans-pro-regular.eot
│ │ │ │ ├── source-sans-pro-regular.ttf
│ │ │ │ ├── source-sans-pro-regular.woff
│ │ │ │ ├── source-sans-pro-semibold.eot
│ │ │ │ ├── source-sans-pro-semibold.ttf
│ │ │ │ ├── source-sans-pro-semibold.woff
│ │ │ │ ├── source-sans-pro-semibolditalic.eot
│ │ │ │ ├── source-sans-pro-semibolditalic.ttf
│ │ │ │ ├── source-sans-pro-semibolditalic.woff
│ │ │ │ └── source-sans-pro.css
│ │ └── js
│ │ │ ├── classList.js
│ │ │ ├── head.min.js
│ │ │ └── html5shiv.js
│ └── plugin
│ │ ├── highlight
│ │ └── highlight.js
│ │ ├── markdown
│ │ ├── example.html
│ │ ├── example.md
│ │ ├── markdown.js
│ │ └── marked.js
│ │ ├── math
│ │ └── math.js
│ │ ├── multiplex
│ │ ├── client.js
│ │ ├── index.js
│ │ ├── master.js
│ │ └── package.json
│ │ ├── notes-server
│ │ ├── client.js
│ │ ├── index.js
│ │ └── notes.html
│ │ ├── notes
│ │ ├── notes.html
│ │ └── notes.js
│ │ ├── print-pdf
│ │ └── print-pdf.js
│ │ ├── search
│ │ └── search.js
│ │ └── zoom-js
│ │ └── zoom.js
└── style.css
├── report.py
├── report
├── 163459__littlebigsounds__lbs-fx-dog-small-alert-bark001.wav
├── Makefile
├── Plots.ipynb
├── abstract.latex
├── cover.latex
├── cover.odt
├── cover.pdf
├── end.latex
├── end.pdf
├── ieee.csl
├── img
│ ├── CMSIS-NN-functions.png
│ ├── SONYC-CPS.png
│ ├── ST-Orlando-SoC.png
│ ├── STM32F103VGT6-LD.jpg
│ ├── activation-functions.png
│ ├── activation-functions.svg
│ ├── analysis-windows.png
│ ├── analysis-windows.svg
│ ├── artificial-neuron.png
│ ├── artificial-neuron.svg
│ ├── audio-aquisition.png
│ ├── audio-aquisition.svg
│ ├── classification-pipeline.png
│ ├── classification-pipeline.svg
│ ├── conv-block-effnet.png
│ ├── conv-block-mobilenet.png
│ ├── conv-block-shufflenet.png
│ ├── conv-blocks-imagenets.png
│ ├── conv-blocks-imagenets.svg
│ ├── conv-depthwise-separable.png
│ ├── conv-grouped-1x1-g3.png
│ ├── conv-grouped-3x3-g3.png
│ ├── conv-standard.png
│ ├── convolution-2d.png
│ ├── convolution-2d.svg
│ ├── crossvalidation.png
│ ├── crossvalidation.svg
│ ├── demo-tightcrop.jpg
│ ├── depthwise-separable-convolution.png
│ ├── depthwise-separable-convolution.svg
│ ├── envnet.png
│ ├── frontpage.png
│ ├── lenet5.png
│ ├── maxpooling.png
│ ├── maxpooling.svg
│ ├── models.svg
│ ├── multilayer-perceptron.png
│ ├── multilayer-perceptron.svg
│ ├── noiseseverity.png
│ ├── noiseseverity.svg
│ ├── piczak-cnn.png
│ ├── results.csv
│ ├── sensornetworks.png
│ ├── sensornetworks.svg
│ ├── sensortile-annotated.jpg
│ ├── sensortile-annotated.svg
│ ├── sensortile-devkit.jpg
│ ├── spatially-separable-convolution.png
│ ├── spatially-separable-convolution.svg
│ ├── spectrograms.svg
│ ├── stm32cubeai.png
│ ├── strided-convolution.png
│ ├── strided-convolution.svg
│ ├── training-inference.png
│ └── training-inference.svg
├── microcontrollers.csv
├── no-figure-floats.tex
├── plot.py
├── plots
│ ├── urbansound8k-examples.png
│ ├── urbansound8k-existing-models-logmel.png
│ └── urbansound8k-existing-models-logmel.tex
├── preamble.tex
├── pyincludes
│ ├── experiment-settings.py
│ ├── experiment-settings.tex
│ ├── microcontrollers.py
│ ├── microcontrollers.tex
│ ├── models.py
│ ├── models.tex
│ ├── results.py
│ ├── results.tex
│ ├── urbansound8k-classes.py
│ └── urbansound8k-classes.tex
├── pyplots
│ ├── dataaugmentations.png
│ ├── dataaugmentations.py
│ ├── filterbanks.png
│ ├── filterbanks.py
│ ├── logloss.png
│ └── logloss.py
├── references.bib
├── references.md
├── report.md
├── results
│ ├── confusion_test.png
│ ├── grouped_confusion_test_foreground.png
│ ├── models_accuracy.png
│ ├── models_efficiency.png
│ └── results.csv
├── sections.py
├── short-captions.lua
├── status.md
├── summary.md
└── urbansound8k-existing.csv
├── requirements.txt
├── run.py
├── test.py
├── test
├── test_models.py
├── test_training.py
└── test_urbansound.py
├── thesis.planner
└── train.py
/.dockerignore:
--------------------------------------------------------------------------------
1 | EdgeML/
2 | scratch/
3 | img/
4 | experiments/speechcommands/
5 | data/
6 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | .~lock*
2 | *.pyc
3 | report/merged.pdf
4 | report/report.pdf
5 | report/summary.pdf
6 | report/status.pdf
7 | /data/
8 | !data/results
9 | .ipynb_checkpoints/
10 | _minted-input/
11 | /test/data
12 | /test/out
13 |
--------------------------------------------------------------------------------
/.travis.yml:
--------------------------------------------------------------------------------
1 | dist: xenial
2 | language: python
3 | python:
4 | - "3.6"
5 | before_install:
6 | - wget https://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh -O miniconda.sh
7 | - bash miniconda.sh -b -p $HOME/miniconda
8 | - source "$HOME/miniconda/etc/profile.d/conda.sh"
9 | - hash -r
10 | - conda config --set always_yes yes --set changeps1 no
11 | - conda update -q conda
12 | - conda info -a
13 | install:
14 | - conda env create -f environment.yml
15 | - conda activate microesc
16 | - wget https://github.com/jgm/pandoc/releases/download/2.7.1/pandoc-2.7.1-linux.tar.gz
17 | - tar -xf pandoc-2.7.1-linux.tar.gz
18 | - export PATH=`pwd`/pandoc-2.7.1/bin/:$PATH
19 | script:
20 | - which pandoc
21 | - pandoc -v
22 | - cd report && make
23 | - pdfinfo report.pdf
24 | - cd -
25 | - PYTHONPATH=./ pytest -v --capture=no
26 | - cd firmware/perftest && make
27 | addons:
28 | apt:
29 | packages:
30 | - pandoc
31 | - pandoc-citeproc
32 | - texlive
33 | - texlive-latex-extra
34 | - texlive-science
35 | - python-pygments
36 | - lmodern
37 | - poppler-utils
38 | - ghostscript
39 | - gcc-arm-none-eabi
40 | - libnewlib-arm-none-eabi
41 | - librsvg2-bin
42 |
--------------------------------------------------------------------------------
/TODO.md:
--------------------------------------------------------------------------------
1 |
2 | ## TODO
3 |
4 | ## After report
5 |
6 | Dissemination
7 |
8 | - Image of overall project/system
9 | - Project image, title page
10 | - Publish on Arxiv? cs.LG cs.SD eess.AS stat.ML
11 | - Write a blogpost
12 |
13 | Related
14 |
15 | - STM32AI: Report melspec preprocessing bug
16 | https://community.st.com/s/topic/0TO0X0000003iUqWAI/stm32-machine-learning-ai
17 |
18 | Experiment
19 |
20 | - Test 16kHz with 30 mels
21 | - Use multi-instance learning. Get bigger batches and improve GPU utilization
22 | - Do hyperparameter optimization per model
23 | - Double-check reproduction of SB-CNN results
24 | - Improve Data Augmentation
25 |
26 |
--------------------------------------------------------------------------------
/data/results/20190511-2353-0e60/0.confusion.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jonnor/ESC-CNN-microcontroller/572c319c7ad4d0a98bf210d59b26f6df923c8e7b/data/results/20190511-2353-0e60/0.confusion.npz
--------------------------------------------------------------------------------
/data/results/20190511-2353-0e60/0.device.json:
--------------------------------------------------------------------------------
1 | {'cpu_mhz': 80, 'macc': 10185806, 'duration_avg': 0.9717960000000001, 'cycles_avg': 77743690, 'stack': 388, 'cycles_macc': 7.632551611526864}
2 |
--------------------------------------------------------------------------------
/data/results/20190511-2353-0e60/0.layers.csv:
--------------------------------------------------------------------------------
1 | ,name,type,shape_in,shape_out,size_in,size_out,activations,weights
2 | 0,conv2d_4,Conv2D,"(60, 31, 1)","(60, 31, 24)",1860,44640,7200,624
3 | 3,activation_6,Activation,"(20, 15, 24)","(20, 15, 24)",7200,7200,7200,
4 | 4,conv2d_5,Conv2D,"(20, 15, 24)","(20, 15, 48)",7200,14400,2016,28848
5 | 7,activation_7,Activation,"(6, 7, 48)","(6, 7, 48)",2016,2016,2016,
6 | 8,conv2d_6,Conv2D,"(6, 7, 48)","(2, 3, 48)",2016,288,576,57648
7 | 12,dense_3,Dense,"(288,)","(64,)",288,64,64,18496
8 | 13,activation_9,Activation,"(64,)","(64,)",64,64,64,
9 | 15,dense_4,Dense,"(64,)","(10,)",64,10,10,650
10 | 17,activation_10,Activation,"(10,)","(10,)",10,10,10,
11 |
--------------------------------------------------------------------------------
/data/results/20190511-2353-0e60/1.confusion.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jonnor/ESC-CNN-microcontroller/572c319c7ad4d0a98bf210d59b26f6df923c8e7b/data/results/20190511-2353-0e60/1.confusion.npz
--------------------------------------------------------------------------------
/data/results/20190511-2353-0e60/1.device.json:
--------------------------------------------------------------------------------
1 | {"cpu_mhz": 80, "macc": 1567280, "duration_avg": 0.244592, "cycles_avg": 19567373, "stack": 388, "cycles_macc": 12.484924837935786}
2 |
--------------------------------------------------------------------------------
/data/results/20190511-2353-0e60/1.layers.csv:
--------------------------------------------------------------------------------
1 | ,name,type,shape_in,shape_out,size_in,size_out,activations,weights
2 | 0,conv2d_2,Conv2D,"(60, 31, 1)","(60, 31, 24)",1860,44640,7200,624
3 | 3,activation_6,Activation,"(20, 15, 24)","(20, 15, 24)",7200,7200,7200,
4 | 4,separable_conv2d_3,SeparableConv2D,"(20, 15, 24)","(20, 15, 48)",7200,14400,7200,624
5 | 7,activation_7,Activation,"(6, 7, 48)","(6, 7, 48)",2016,2016,2016,
6 | 8,separable_conv2d_4,SeparableConv2D,"(6, 7, 48)","(2, 3, 48)",2016,288,288,1248
7 | 12,dense_3,Dense,"(288,)","(64,)",288,64,64,18496
8 | 13,activation_9,Activation,"(64,)","(64,)",64,64,64,
9 | 15,dense_4,Dense,"(64,)","(10,)",64,10,10,650
10 | 17,activation_10,Activation,"(10,)","(10,)",10,10,10,
11 |
--------------------------------------------------------------------------------
/data/results/20190511-2353-0e60/2.confusion.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jonnor/ESC-CNN-microcontroller/572c319c7ad4d0a98bf210d59b26f6df923c8e7b/data/results/20190511-2353-0e60/2.confusion.npz
--------------------------------------------------------------------------------
/data/results/20190511-2353-0e60/2.device.json:
--------------------------------------------------------------------------------
1 | {'cpu_mhz': 80, 'macc': 2980798, 'duration_avg': 0.325142, 'cycles_avg': 26011364, 'stack': 276, 'cycles_macc': 8.726308860915768}
2 |
--------------------------------------------------------------------------------
/data/results/20190511-2353-0e60/2.layers.csv:
--------------------------------------------------------------------------------
1 | ,name,type,shape_in,shape_out,size_in,size_out,activations,weights
2 | 0,input_2,InputLayer,"(60, 31, 1)","(60, 31, 1)",1860,1860,1860,
3 | 1,conv0,Conv2D,"(60, 31, 1)","(30, 16, 22)",1860,10560,10560,572
4 | 4,conv1,Conv2D,"(30, 16, 22)","(15, 8, 33)",10560,3960,3960,18183
5 | 7,conv2,Conv2D,"(15, 8, 33)","(6, 2, 49)",3960,588,1176,40474
6 | 11,dense_3,Dense,"(588,)","(64,)",588,64,64,37696
7 | 12,activation_3,Activation,"(64,)","(64,)",64,64,64,
8 | 14,dense_4,Dense,"(64,)","(10,)",64,10,10,650
9 | 16,activation_4,Activation,"(10,)","(10,)",10,10,10,
10 |
--------------------------------------------------------------------------------
/data/results/20190511-2353-0e60/3.confusion.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jonnor/ESC-CNN-microcontroller/572c319c7ad4d0a98bf210d59b26f6df923c8e7b/data/results/20190511-2353-0e60/3.confusion.npz
--------------------------------------------------------------------------------
/data/results/20190511-2353-0e60/3.device.json:
--------------------------------------------------------------------------------
1 | {'cpu_mhz': 80, 'macc': 477236, 'duration_avg': 0.081599, 'cycles_avg': 6527943, 'stack': 292, 'cycles_macc': 13.678647461633238}
2 |
--------------------------------------------------------------------------------
/data/results/20190511-2353-0e60/3.layers.csv:
--------------------------------------------------------------------------------
1 | ,name,type,shape_in,shape_out,size_in,size_out,activations,weights
2 | 0,input_2,InputLayer,"(60, 31, 1)","(60, 31, 1)",1860,1860,1860,
3 | 1,conv0,Conv2D,"(60, 31, 1)","(30, 16, 24)",1860,11520,11520,624
4 | 4,conv1_ds,SeparableConv2D,"(30, 16, 24)","(15, 8, 36)",11520,4320,2880,
5 | 7,conv2_ds,SeparableConv2D,"(15, 8, 36)","(6, 2, 54)",4320,648,432,
6 | 11,dense_3,Dense,"(648,)","(64,)",648,64,64,41536
7 | 12,activation_3,Activation,"(64,)","(64,)",64,64,64,
8 | 14,dense_4,Dense,"(64,)","(10,)",64,10,10,650
9 | 16,activation_4,Activation,"(10,)","(10,)",10,10,10,
10 |
--------------------------------------------------------------------------------
/data/results/20190511-2353-0e60/4.confusion.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jonnor/ESC-CNN-microcontroller/572c319c7ad4d0a98bf210d59b26f6df923c8e7b/data/results/20190511-2353-0e60/4.confusion.npz
--------------------------------------------------------------------------------
/data/results/20190511-2353-0e60/4.device.json:
--------------------------------------------------------------------------------
1 | {"cpu_mhz": 80, "macc": 468649, "duration_avg": 0.073457, "cycles_avg": 5876568, "stack": 292, "cycles_macc": 12.539380218457737}
2 |
--------------------------------------------------------------------------------
/data/results/20190511-2353-0e60/4.layers.csv:
--------------------------------------------------------------------------------
1 | ,name,type,shape_in,shape_out,size_in,size_out,activations,weights
2 | 0,input_2,InputLayer,"(60, 31, 1)","(60, 31, 1)",1860,1860,1860,
3 | 1,conv0,Conv2D,"(60, 31, 1)","(30, 16, 22)",1860,10560,10560,572
4 | 4,conv1pw,Conv2D,"(30, 16, 22)","(15, 8, 16)",10560,1920,1920,16
5 | 7,conv1dwv,DepthwiseConv2D,"(15, 8, 16)","(15, 8, 16)",1920,1920,1920,96
6 | 10,conv1dwh,DepthwiseConv2D,"(15, 8, 16)","(15, 8, 16)",1920,1920,1920,16
7 | 13,conv1rh,Conv2D,"(15, 8, 16)","(15, 8, 33)",1920,3960,3960,33
8 | 17,conv2pw,Conv2D,"(17, 10, 33)","(9, 5, 24)",5610,1080,1080,24
9 | 20,conv2dwv,DepthwiseConv2D,"(9, 5, 24)","(9, 1, 24)",1080,216,216,144
10 | 23,conv2dwh,DepthwiseConv2D,"(9, 1, 24)","(9, 1, 24)",216,216,216,24
11 | 26,conv2rh,Conv2D,"(9, 1, 24)","(9, 1, 49)",216,441,882,49
12 | 30,dense_3,Dense,"(441,)","(64,)",441,64,64,28288
13 | 31,activation_3,Activation,"(64,)","(64,)",64,64,64,
14 | 33,dense_4,Dense,"(64,)","(10,)",64,10,10,650
15 | 35,activation_4,Activation,"(10,)","(10,)",10,10,10,
16 |
--------------------------------------------------------------------------------
/data/results/20190511-2353-0e60/5.confusion.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jonnor/ESC-CNN-microcontroller/572c319c7ad4d0a98bf210d59b26f6df923c8e7b/data/results/20190511-2353-0e60/5.confusion.npz
--------------------------------------------------------------------------------
/data/results/20190511-2353-0e60/5.device.json:
--------------------------------------------------------------------------------
1 | {"cpu_mhz": 80, "macc": 318497, "duration_avg": 0.059537999999999994, "cycles_avg": 4763083, "stack": 292, "cycles_macc": 14.954875556127687}
2 |
--------------------------------------------------------------------------------
/data/results/20190511-2353-0e60/5.layers.csv:
--------------------------------------------------------------------------------
1 | ,name,type,shape_in,shape_out,size_in,size_out,activations,weights
2 | 0,input_2,InputLayer,"(60, 31, 1)","(60, 31, 1)",1860,1860,1860,
3 | 1,conv0,Conv2D,"(60, 31, 1)","(30, 16, 24)",1860,11520,11520,240
4 | 4,conv1_ds,SeparableConv2D,"(30, 16, 24)","(15, 8, 36)",11520,4320,2880,
5 | 7,conv2_ds,SeparableConv2D,"(15, 8, 36)","(8, 4, 54)",4320,1728,1152,
6 | 10,conv3_ds,SeparableConv2D,"(8, 4, 54)","(3, 1, 81)",1728,243,162,
7 | 14,dense_3,Dense,"(243,)","(64,)",243,64,64,15616
8 | 15,activation_3,Activation,"(64,)","(64,)",64,64,64,
9 | 17,dense_4,Dense,"(64,)","(10,)",64,10,10,650
10 | 19,activation_4,Activation,"(10,)","(10,)",10,10,10,
11 |
--------------------------------------------------------------------------------
/data/results/20190511-2353-0e60/6.confusion.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jonnor/ESC-CNN-microcontroller/572c319c7ad4d0a98bf210d59b26f6df923c8e7b/data/results/20190511-2353-0e60/6.confusion.npz
--------------------------------------------------------------------------------
/data/results/20190511-2353-0e60/6.device.json:
--------------------------------------------------------------------------------
1 | {"cpu_mhz": 80, "macc": 445688, "duration_avg": 0.071148, "cycles_avg": 5691859, "stack": 292, "cycles_macc": 12.77094963292707}
2 |
--------------------------------------------------------------------------------
/data/results/20190511-2353-0e60/6.layers.csv:
--------------------------------------------------------------------------------
1 | ,name,type,shape_in,shape_out,size_in,size_out,activations,weights
2 | 0,input_2,InputLayer,"(60, 31, 1)","(60, 31, 1)",1860,1860,1860,
3 | 1,conv0,Conv2D,"(60, 31, 1)","(30, 16, 22)",1860,10560,10560,572
4 | 4,conv1_pw,Conv2D,"(30, 16, 22)","(15, 8, 16)",10560,1920,1920,16
5 | 5,conv1_ds,SeparableConv2D,"(15, 8, 16)","(15, 8, 33)",1920,3960,1920,
6 | 9,conv2_pw,Conv2D,"(17, 10, 33)","(9, 5, 24)",5610,1080,1080,24
7 | 10,conv2_ds,SeparableConv2D,"(9, 5, 24)","(5, 1, 49)",1080,245,120,
8 | 14,dense_3,Dense,"(245,)","(64,)",245,64,64,15744
9 | 15,activation_3,Activation,"(64,)","(64,)",64,64,64,
10 | 17,dense_4,Dense,"(64,)","(10,)",64,10,10,650
11 | 19,activation_4,Activation,"(10,)","(10,)",10,10,10,
12 |
--------------------------------------------------------------------------------
/data/results/20190511-2353-0e60/7.confusion.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jonnor/ESC-CNN-microcontroller/572c319c7ad4d0a98bf210d59b26f6df923c8e7b/data/results/20190511-2353-0e60/7.confusion.npz
--------------------------------------------------------------------------------
/data/results/20190511-2353-0e60/7.device.json:
--------------------------------------------------------------------------------
1 | {"cpu_mhz": 80, "macc": 380749, "duration_avg": 0.06633, "cycles_avg": 5306424, "stack": 292, "cycles_macc": 13.936803510974421}
2 |
--------------------------------------------------------------------------------
/data/results/20190511-2353-0e60/7.layers.csv:
--------------------------------------------------------------------------------
1 | ,name,type,shape_in,shape_out,size_in,size_out,activations,weights
2 | 0,input_2,InputLayer,"(60, 31, 1)","(60, 31, 1)",1860,1860,1860,
3 | 1,conv0,Conv2D,"(60, 31, 1)","(30, 16, 20)",1860,9600,9600,520
4 | 4,conv1_ds,SeparableConv2D,"(30, 16, 20)","(15, 8, 30)",9600,3600,2400,
5 | 7,conv2_ds,SeparableConv2D,"(15, 8, 30)","(6, 2, 45)",3600,540,360,
6 | 11,dense_3,Dense,"(540,)","(64,)",540,64,64,34624
7 | 12,activation_3,Activation,"(64,)","(64,)",64,64,64,
8 | 14,dense_4,Dense,"(64,)","(10,)",64,10,10,650
9 | 16,activation_4,Activation,"(10,)","(10,)",10,10,10,
10 |
--------------------------------------------------------------------------------
/data/results/20190511-2353-0e60/8.confusion.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jonnor/ESC-CNN-microcontroller/572c319c7ad4d0a98bf210d59b26f6df923c8e7b/data/results/20190511-2353-0e60/8.confusion.npz
--------------------------------------------------------------------------------
/data/results/20190511-2353-0e60/8.device.json:
--------------------------------------------------------------------------------
1 | {"cpu_mhz": 80, "macc": 291318, "duration_avg": 0.051713999999999996, "cycles_avg": 4137172, "stack": 292, "cycles_macc": 14.201566672845482}
2 |
--------------------------------------------------------------------------------
/data/results/20190511-2353-0e60/8.layers.csv:
--------------------------------------------------------------------------------
1 | ,name,type,shape_in,shape_out,size_in,size_out,activations,weights
2 | 0,input_2,InputLayer,"(60, 31, 1)","(60, 31, 1)",1860,1860,1860,
3 | 1,conv0,Conv2D,"(60, 31, 1)","(30, 16, 16)",1860,7680,7680,416
4 | 4,conv1_ds,SeparableConv2D,"(30, 16, 16)","(15, 8, 24)",7680,2880,1920,
5 | 7,conv2_ds,SeparableConv2D,"(15, 8, 24)","(6, 2, 36)",2880,432,288,
6 | 11,dense_3,Dense,"(432,)","(64,)",432,64,64,27712
7 | 12,activation_3,Activation,"(64,)","(64,)",64,64,64,
8 | 14,dense_4,Dense,"(64,)","(10,)",64,10,10,650
9 | 16,activation_4,Activation,"(10,)","(10,)",10,10,10,
10 |
--------------------------------------------------------------------------------
/data/results/20190511-2353-0e60/9.confusion.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jonnor/ESC-CNN-microcontroller/572c319c7ad4d0a98bf210d59b26f6df923c8e7b/data/results/20190511-2353-0e60/9.confusion.npz
--------------------------------------------------------------------------------
/data/results/20190511-2353-0e60/9.device.json:
--------------------------------------------------------------------------------
1 | {"cpu_mhz": 80, "macc": 208943, "duration_avg": 0.038006, "cycles_avg": 3040495, "stack": 292, "cycles_macc": 14.551791636953618}
2 |
--------------------------------------------------------------------------------
/data/results/20190511-2353-0e60/9.layers.csv:
--------------------------------------------------------------------------------
1 | ,name,type,shape_in,shape_out,size_in,size_out,activations,weights
2 | 0,input_2,InputLayer,"(60, 31, 1)","(60, 31, 1)",1860,1860,1860,
3 | 1,conv0,Conv2D,"(60, 31, 1)","(30, 16, 12)",1860,5760,5760,312
4 | 4,conv1_ds,SeparableConv2D,"(30, 16, 12)","(15, 8, 18)",5760,2160,1440,
5 | 7,conv2_ds,SeparableConv2D,"(15, 8, 18)","(6, 2, 27)",2160,324,216,
6 | 11,dense_3,Dense,"(324,)","(64,)",324,64,64,20800
7 | 12,activation_3,Activation,"(64,)","(64,)",64,64,64,
8 | 14,dense_4,Dense,"(64,)","(10,)",64,10,10,650
9 | 16,activation_4,Activation,"(10,)","(10,)",10,10,10,
10 |
--------------------------------------------------------------------------------
/data/results/20190511-2353-0e60/stm32stats.csv:
--------------------------------------------------------------------------------
1 | experiment,maccs_frame,flash_usage,ram_usage_max,ram_usage_min
2 | 0,10185806.0,415100.0,36290.0,36290.0
3 | 1,1567280.0,98410.0,56350.0,56350.0
4 | 2,2980798.0,381150.0,56720.0,56720.0
5 | 3,477236.0,184640.0,56250.0,56250.0
6 | 4,468649.0,128750.0,48750.0,48750.0
7 | 5,318497.0,97650.0,56250.0,56250.0
8 | 6,445688.0,81940.0,48750.0,48750.0
9 | 7,380749.0,152810.0,46880.0,46880.0
10 | 8,291318.0,121590.0,37500.0,37500.0
11 | 9,208943.0,90970.0,28130.0,28130.0
12 |
--------------------------------------------------------------------------------
/environment.yml:
--------------------------------------------------------------------------------
1 | name: microesc
2 | dependencies:
3 | - python=3.6.*
4 | - pip=19.0.*
5 | - numpy=1.17.*
6 | - matplotlib=3.1.*
7 | - pandas>=0.23.0
8 | - scipy>=1.3.0
9 | - seaborn>=0.9.0
10 | - scikit-learn>=0.21.1
11 | - jupyterlab>=1.1.4
12 | - pip:
13 | - soundfile>=0.10
14 | - librosa>=0.7.*
15 | - pytest>=5.1.3
16 | - tensorflow==1.15
17 | - keras==2.2.5
18 | - h5py==2.10.0
19 |
--------------------------------------------------------------------------------
/experiments/16k30_256hop.yaml:
--------------------------------------------------------------------------------
1 | samplerate: 16000
2 | n_mels: 30
3 | fmin: 0
4 | fmax: 8000
5 | n_fft: 512
6 | hop_length: 256
7 | augmentations: 12
8 | augment: 1
9 | frames: 62
10 | batch: 400
11 | epochs: 50
12 | train_samples: 30000
13 | val_samples: 5000
14 | learning_rate: 0.005
15 | voting: 'mean'
16 | voting_overlap: 0.0
17 |
--------------------------------------------------------------------------------
/experiments/16k30_512hop.yaml:
--------------------------------------------------------------------------------
1 | samplerate: 16000
2 | n_mels: 30
3 | fmin: 0
4 | fmax: 8000
5 | n_fft: 1024
6 | hop_length: 512
7 | augmentations: 12
8 | augment: 1
9 | frames: 31
10 | batch: 400
11 | epochs: 50
12 | train_samples: 30000
13 | val_samples: 5000
14 | learning_rate: 0.005
15 | voting: 'mean'
16 | voting_overlap: 0.0
17 | normalize: 'max'
18 |
--------------------------------------------------------------------------------
/experiments/baselines.csv:
--------------------------------------------------------------------------------
1 | name,input,accuracy,parameters,macc,reference
2 | SBCNN,logmel,0.72,56700
3 | SKM,logmel,0.72,2100000,,
4 | PiczakCNN,logmel,0.72,25000000,,
5 | Dilated,logmel,0.78,,
6 | M18,audio,0.71,3700000,,
7 | M5-big,audio,0.63,2200000,,
8 | EnvNet2,audio,0.691,,,
9 | EnvNet,audio,0.64,,,
10 | MC-DCNN,audio,0.73,,,
11 | WSN,audio,0.705,520000,,
12 |
--------------------------------------------------------------------------------
/experiments/ldcnn20k60.yaml:
--------------------------------------------------------------------------------
1 | samplerate: 22050
2 | n_mels: 60
3 | fmin: 0
4 | fmax: 11025
5 | n_fft: 1024
6 | hop_length: 512
7 | augmentations: 12
8 | augment: 1
9 | frames: 31
10 | batch: 400
11 | epochs: 100
12 | train_samples: 30000
13 | val_samples: 5000
14 | learning_rate: 0.005
15 | voting: 'mean'
16 | voting_overlap: 0.0
17 | nesterov_momentum: 0.9
18 | fully_connected: 32
19 |
--------------------------------------------------------------------------------
/firmware/perftest/.gitignore:
--------------------------------------------------------------------------------
1 | build/
2 |
--------------------------------------------------------------------------------
/firmware/perftest/Drivers/CMSIS/Device/ST/STM32L4xx/Include/stm32l476xx.h:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jonnor/ESC-CNN-microcontroller/572c319c7ad4d0a98bf210d59b26f6df923c8e7b/firmware/perftest/Drivers/CMSIS/Device/ST/STM32L4xx/Include/stm32l476xx.h
--------------------------------------------------------------------------------
/firmware/perftest/Drivers/CMSIS/Device/ST/STM32L4xx/Include/stm32l4xx.h:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jonnor/ESC-CNN-microcontroller/572c319c7ad4d0a98bf210d59b26f6df923c8e7b/firmware/perftest/Drivers/CMSIS/Device/ST/STM32L4xx/Include/stm32l4xx.h
--------------------------------------------------------------------------------
/firmware/perftest/Drivers/STM32L4xx_HAL_Driver/Inc/stm32l4xx_hal_flash_ramfunc.h:
--------------------------------------------------------------------------------
1 | /**
2 | ******************************************************************************
3 | * @file stm32l4xx_hal_flash_ramfunc.h
4 | * @author MCD Application Team
5 | * @brief Header file of FLASH RAMFUNC driver.
6 | ******************************************************************************
7 | * @attention
8 | *
9 | *
© COPYRIGHT(c) 2017 STMicroelectronics
10 | *
11 | * Redistribution and use in source and binary forms, with or without modification,
12 | * are permitted provided that the following conditions are met:
13 | * 1. Redistributions of source code must retain the above copyright notice,
14 | * this list of conditions and the following disclaimer.
15 | * 2. Redistributions in binary form must reproduce the above copyright notice,
16 | * this list of conditions and the following disclaimer in the documentation
17 | * and/or other materials provided with the distribution.
18 | * 3. Neither the name of STMicroelectronics nor the names of its contributors
19 | * may be used to endorse or promote products derived from this software
20 | * without specific prior written permission.
21 | *
22 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23 | * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
25 | * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
26 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
28 | * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
29 | * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 | * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 | *
33 | ******************************************************************************
34 | */
35 |
36 | /* Define to prevent recursive inclusion -------------------------------------*/
37 | #ifndef __STM32L4xx_FLASH_RAMFUNC_H
38 | #define __STM32L4xx_FLASH_RAMFUNC_H
39 |
40 | #ifdef __cplusplus
41 | extern "C" {
42 | #endif
43 |
44 | /* Includes ------------------------------------------------------------------*/
45 | #include "stm32l4xx_hal_def.h"
46 |
47 | /** @addtogroup STM32L4xx_HAL_Driver
48 | * @{
49 | */
50 |
51 | /** @addtogroup FLASH_RAMFUNC
52 | * @{
53 | */
54 |
55 | /* Exported types ------------------------------------------------------------*/
56 | /* Exported macro ------------------------------------------------------------*/
57 | /* Exported functions --------------------------------------------------------*/
58 | /** @addtogroup FLASH_RAMFUNC_Exported_Functions
59 | * @{
60 | */
61 |
62 | /** @addtogroup FLASH_RAMFUNC_Exported_Functions_Group1
63 | * @{
64 | */
65 | /* Peripheral Control functions ************************************************/
66 | __RAM_FUNC HAL_FLASHEx_EnableRunPowerDown(void);
67 | __RAM_FUNC HAL_FLASHEx_DisableRunPowerDown(void);
68 | #if defined (STM32L4R5xx) || defined (STM32L4R7xx) || defined (STM32L4R9xx) || defined (STM32L4S5xx) || defined (STM32L4S7xx) || defined (STM32L4S9xx)
69 | __RAM_FUNC HAL_FLASHEx_OB_DBankConfig(uint32_t DBankConfig);
70 | #endif
71 | /**
72 | * @}
73 | */
74 |
75 | /**
76 | * @}
77 | */
78 |
79 | /**
80 | * @}
81 | */
82 |
83 | /**
84 | * @}
85 | */
86 |
87 | #ifdef __cplusplus
88 | }
89 | #endif
90 |
91 | #endif /* __STM32L4xx_FLASH_RAMFUNC_H */
92 |
93 | /************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/
94 |
--------------------------------------------------------------------------------
/firmware/perftest/Inc/RTE_Components.h:
--------------------------------------------------------------------------------
1 | /**
2 | ******************************************************************************
3 | * @file
4 | * @author MCD Application Team
5 | * @version V2.0.0
6 | ******************************************************************************
7 | * @attention
8 | *
9 | * COPYRIGHT(c) 2019 STMicroelectronics
10 | *
11 | * Redistribution and use in source and binary forms, with or without modification,
12 | * are permitted provided that the following conditions are met:
13 | * 1. Redistributions of source code must retain the above copyright notice,
14 | * this list of conditions and the following disclaimer.
15 | * 2. Redistributions in binary form must reproduce the above copyright notice,
16 | * this list of conditions and the following disclaimer in the documentation
17 | * and/or other materials provided with the distribution.
18 | * 3. Neither the name of STMicroelectronics nor the names of its contributors
19 | * may be used to endorse or promote products derived from this software
20 | * without specific prior written permission.
21 | *
22 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23 | * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
25 | * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
26 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
28 | * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
29 | * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 | * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 | *
33 | */
34 | /* Define to prevent recursive inclusion -------------------------------------*/
35 | #ifndef __RTE_COMPONENTS_H__
36 | #define __RTE_COMPONENTS_H__
37 |
38 | /* Defines ------------------------------------------------------------------*/
39 | #define AI_SYSTEM_PERFORMANCE
40 |
41 | #endif /* __RTE_COMPONENTS_H__ */
42 |
--------------------------------------------------------------------------------
/firmware/perftest/Inc/bsp_ai.h:
--------------------------------------------------------------------------------
1 | /**
2 | ******************************************************************************
3 | * File Name : bsp.h
4 | * Description :
5 | ******************************************************************************
6 | */
7 |
8 | /* Define to prevent recursive inclusion -------------------------------------*/
9 | #ifndef BSP_H
10 | #define BSP_H
11 | #ifdef __cplusplus
12 | extern "C" {
13 | #endif
14 | /**
15 | ******************************************************************************
16 | * @file : bsp.h
17 | * @brief : Link to board resources
18 | ******************************************************************************
19 | * This notice applies to any and all portions of this file
20 | * that are not between comment pairs USER CODE BEGIN and
21 | * USER CODE END. Other portions of this file, whether
22 | * inserted by the user or by software development tools
23 | * are owned by their respective copyright owners.
24 | *
25 | * Copyright (c) 2018 STMicroelectronics International N.V.
26 | * All rights reserved.
27 | *
28 | * Redistribution and use in source and binary forms, with or without
29 | * modification, are permitted, provided that the following conditions are met:
30 | *
31 | * 1. Redistribution of source code must retain the above copyright notice,
32 | * this list of conditions and the following disclaimer.
33 | * 2. Redistributions in binary form must reproduce the above copyright notice,
34 | * this list of conditions and the following disclaimer in the documentation
35 | * and/or other materials provided with the distribution.
36 | * 3. Neither the name of STMicroelectronics nor the names of other
37 | * contributors to this software may be used to endorse or promote products
38 | * derived from this software without specific written permission.
39 | * 4. This software, including modifications and/or derivative works of this
40 | * software, must execute solely and exclusively on microcontroller or
41 | * microprocessor devices manufactured by or for STMicroelectronics.
42 | * 5. Redistribution and use of this software other than as permitted under
43 | * this license is void and will automatically terminate your rights under
44 | * this license.
45 | *
46 | * THIS SOFTWARE IS PROVIDED BY STMICROELECTRONICS AND CONTRIBUTORS "AS IS"
47 | * AND ANY EXPRESS, IMPLIED OR STATUTORY WARRANTIES, INCLUDING, BUT NOT
48 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
49 | * PARTICULAR PURPOSE AND NON-INFRINGEMENT OF THIRD PARTY INTELLECTUAL PROPERTY
50 | * RIGHTS ARE DISCLAIMED TO THE FULLEST EXTENT PERMITTED BY LAW. IN NO EVENT
51 | * SHALL STMICROELECTRONICS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
52 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
53 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
54 | * OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
55 | * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
56 | * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
57 | * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
58 | *
59 | ******************************************************************************
60 | */
61 | #include "main.h"
62 | #include "stm32l4xx.h"
63 | #include "app_x-cube-ai.h"
64 | #include "constants_ai.h"
65 | #define UartHandle huart2
66 | #define MX_UARTx_Init MX_USART2_UART_Init
67 | #ifdef __cplusplus
68 | }
69 | #endif
70 |
71 | #endif /* BSP_H */
72 |
--------------------------------------------------------------------------------
/firmware/perftest/Inc/constants_ai.h:
--------------------------------------------------------------------------------
1 | /* Define to prevent recursive inclusion -------------------------------------*/
2 | #ifndef __CONSTANTS_H
3 | #define __CONSTANTS_H
4 | #ifdef __cplusplus
5 | extern "C" {
6 | #endif
7 | /**
8 | ******************************************************************************
9 | * @file : constants.h
10 | * @brief : AI constants definitions
11 | ******************************************************************************
12 | * This notice applies to any and all portions of this file
13 | * that are not between comment pairs USER CODE BEGIN and
14 | * USER CODE END. Other portions of this file, whether
15 | * inserted by the user or by software development tools
16 | * are owned by their respective copyright owners.
17 | *
18 | * Copyright (c) 2018 STMicroelectronics International N.V.
19 | * All rights reserved.
20 | *
21 | * Redistribution and use in source and binary forms, with or without
22 | * modification, are permitted, provided that the following conditions are met:
23 | *
24 | * 1. Redistribution of source code must retain the above copyright notice,
25 | * this list of conditions and the following disclaimer.
26 | * 2. Redistributions in binary form must reproduce the above copyright notice,
27 | * this list of conditions and the following disclaimer in the documentation
28 | * and/or other materials provided with the distribution.
29 | * 3. Neither the name of STMicroelectronics nor the names of other
30 | * contributors to this software may be used to endorse or promote products
31 | * derived from this software without specific written permission.
32 | * 4. This software, including modifications and/or derivative works of this
33 | * software, must execute solely and exclusively on microcontroller or
34 | * microprocessor devices manufactured by or for STMicroelectronics.
35 | * 5. Redistribution and use of this software other than as permitted under
36 | * this license is void and will automatically terminate your rights under
37 | * this license.
38 | *
39 | * THIS SOFTWARE IS PROVIDED BY STMICROELECTRONICS AND CONTRIBUTORS "AS IS"
40 | * AND ANY EXPRESS, IMPLIED OR STATUTORY WARRANTIES, INCLUDING, BUT NOT
41 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
42 | * PARTICULAR PURPOSE AND NON-INFRINGEMENT OF THIRD PARTY INTELLECTUAL PROPERTY
43 | * RIGHTS ARE DISCLAIMED TO THE FULLEST EXTENT PERMITTED BY LAW. IN NO EVENT
44 | * SHALL STMICROELECTRONICS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
45 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
46 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
47 | * OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
48 | * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
49 | * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
50 | * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
51 | *
52 | ******************************************************************************
53 | */
54 | /* Constants definitions ------------------------------------------------------------------*/
55 | #define MIN_HEAP_SIZE 0x200
56 | #define MIN_STACK_SIZE 0x400
57 | #ifdef __cplusplus
58 | }
59 | #endif
60 |
61 | #endif /*__constants_ai_h_H */
62 |
--------------------------------------------------------------------------------
/firmware/perftest/Inc/stm32l4xx_it.h:
--------------------------------------------------------------------------------
1 | /* USER CODE BEGIN Header */
2 | /**
3 | ******************************************************************************
4 | * @file stm32l4xx_it.h
5 | * @brief This file contains the headers of the interrupt handlers.
6 | ******************************************************************************
7 | *
8 | * COPYRIGHT(c) 2019 STMicroelectronics
9 | *
10 | * Redistribution and use in source and binary forms, with or without modification,
11 | * are permitted provided that the following conditions are met:
12 | * 1. Redistributions of source code must retain the above copyright notice,
13 | * this list of conditions and the following disclaimer.
14 | * 2. Redistributions in binary form must reproduce the above copyright notice,
15 | * this list of conditions and the following disclaimer in the documentation
16 | * and/or other materials provided with the distribution.
17 | * 3. Neither the name of STMicroelectronics nor the names of its contributors
18 | * may be used to endorse or promote products derived from this software
19 | * without specific prior written permission.
20 | *
21 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22 | * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
24 | * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
25 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
27 | * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
28 | * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
29 | * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 | *
32 | ******************************************************************************
33 | */
34 | /* USER CODE END Header */
35 |
36 | /* Define to prevent recursive inclusion -------------------------------------*/
37 | #ifndef __STM32L4xx_IT_H
38 | #define __STM32L4xx_IT_H
39 |
40 | #ifdef __cplusplus
41 | extern "C" {
42 | #endif
43 |
44 | /* Private includes ----------------------------------------------------------*/
45 | /* USER CODE BEGIN Includes */
46 |
47 | /* USER CODE END Includes */
48 |
49 | /* Exported types ------------------------------------------------------------*/
50 | /* USER CODE BEGIN ET */
51 |
52 | /* USER CODE END ET */
53 |
54 | /* Exported constants --------------------------------------------------------*/
55 | /* USER CODE BEGIN EC */
56 |
57 | /* USER CODE END EC */
58 |
59 | /* Exported macro ------------------------------------------------------------*/
60 | /* USER CODE BEGIN EM */
61 |
62 | /* USER CODE END EM */
63 |
64 | /* Exported functions prototypes ---------------------------------------------*/
65 | void NMI_Handler(void);
66 | void HardFault_Handler(void);
67 | void MemManage_Handler(void);
68 | void BusFault_Handler(void);
69 | void UsageFault_Handler(void);
70 | void SVC_Handler(void);
71 | void DebugMon_Handler(void);
72 | void PendSV_Handler(void);
73 | void SysTick_Handler(void);
74 | /* USER CODE BEGIN EFP */
75 |
76 | /* USER CODE END EFP */
77 |
78 | #ifdef __cplusplus
79 | }
80 | #endif
81 |
82 | #endif /* __STM32L4xx_IT_H */
83 |
84 | /************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/
85 |
--------------------------------------------------------------------------------
/firmware/perftest/Middlewares/ST/AI/AI/data/network_data.h:
--------------------------------------------------------------------------------
1 | /**
2 | ******************************************************************************
3 | * @file network_data.h
4 | * @author AST Embedded Analytics Research Platform
5 | * @date Fri Apr 5 13:55:54 2019
6 | * @brief AI Tool Automatic Code Generator for Embedded NN computing
7 | ******************************************************************************
8 | * @attention
9 | *
10 | * © Copyright (c) 2018 STMicroelectronics.
11 | * All rights reserved.
12 | *
13 | * This software component is licensed by ST under Ultimate Liberty license
14 | * SLA0044, the "License"; You may not use this file except in compliance with
15 | * the License. You may obtain a copy of the License at:
16 | * www.st.com/SLA0044
17 | *
18 | ******************************************************************************
19 | */
20 |
21 | #ifndef __NETWORK_DATA_H_
22 | #define __NETWORK_DATA_H_
23 | #pragma once
24 |
25 | #include "ai_platform.h"
26 |
27 | #define AI_NETWORK_DATA_CONFIG AI_HANDLE_NULL
28 |
29 | #define AI_NETWORK_DATA_ACTIVATIONS_SIZE (37156)
30 |
31 | #define AI_NETWORK_DATA_WEIGHTS_SIZE (425064)
32 |
33 | #define AI_NETWORK_DATA_ACTIVATIONS(ptr_) \
34 | AI_BUFFER_OBJ_INIT( \
35 | AI_BUFFER_FORMAT_U8, \
36 | 1, 1, AI_NETWORK_DATA_ACTIVATIONS_SIZE, 1, \
37 | AI_HANDLE_PTR(ptr_) )
38 |
39 | #define AI_NETWORK_DATA_WEIGHTS(ptr_) \
40 | AI_BUFFER_OBJ_INIT( \
41 | AI_BUFFER_FORMAT_U8|AI_BUFFER_FMT_FLAG_CONST, \
42 | 1, 1, AI_NETWORK_DATA_WEIGHTS_SIZE, 1, \
43 | AI_HANDLE_PTR(ptr_) )
44 |
45 |
46 | AI_API_DECLARE_BEGIN
47 |
48 | /*!
49 | * @brief Get network weights array pointer as a handle ptr.
50 | * @ingroup network_data
51 | * @return a ai_handle pointer to the weights array
52 | */
53 | AI_API_ENTRY
54 | ai_handle ai_network_data_weights_get(void);
55 |
56 |
57 | AI_API_DECLARE_END
58 |
59 | #endif /* __NETWORK_DATA_H_ */
60 |
61 |
--------------------------------------------------------------------------------
/firmware/perftest/Middlewares/ST/AI/AI/include/ai_common_config.h:
--------------------------------------------------------------------------------
1 | /**
2 | ******************************************************************************
3 | * @file ai_common_config.h
4 | * @author AST Embedded Analytics Research Platform
5 | * @date 18-May-2018
6 | * @brief header file of AI platform common compile configuration defines
7 | ******************************************************************************
8 | * @attention
9 | *
10 | * © Copyright (c) 2018 STMicroelectronics.
11 | * All rights reserved.
12 | *
13 | * This software component is licensed by ST under Ultimate Liberty license
14 | * SLA0044, the "License"; You may not use this file except in compliance with
15 | * the License. You may obtain a copy of the License at:
16 | * www.st.com/SLA0044
17 | *
18 | ******************************************************************************
19 | */
20 | #ifndef __AI_COMMON_CONFIG_H_
21 | #define __AI_COMMON_CONFIG_H_
22 | #pragma once
23 |
24 | /*!
25 | * @defgroup layers Layers Compilation Config Definitions
26 | * @brief definition
27 | *
28 | */
29 |
30 | #define HAS_PROFILE_FLOAT
31 | #define HAS_PROFILE_FIXED
32 |
33 |
34 | #endif /*__AI_COMMON_CONFIG_H_*/
35 |
--------------------------------------------------------------------------------
/firmware/perftest/Middlewares/ST/AI/AI/include/ai_memory_manager.h:
--------------------------------------------------------------------------------
1 | /**
2 | ******************************************************************************
3 | * @file ai_memory_manager.h
4 | * @author AST Embedded Analytics Research Platform
5 | * @date 18-Jun-2018
6 | * @brief AI Library Memory Management Wrappers
7 | ******************************************************************************
8 | * @attention
9 | *
10 | * © Copyright (c) 2018 STMicroelectronics.
11 | * All rights reserved.
12 | *
13 | * This software component is licensed by ST under Ultimate Liberty license
14 | * SLA0044, the "License"; You may not use this file except in compliance with
15 | * the License. You may obtain a copy of the License at:
16 | * www.st.com/SLA0044
17 | *
18 | ******************************************************************************
19 | */
20 |
21 | #ifndef __AI_MEMORY_MANAGER_H__
22 | #define __AI_MEMORY_MANAGER_H__
23 | #pragma once
24 |
25 | #include /* memcpy */
26 | #include
27 |
28 | #include "ai_datatypes_defines.h"
29 |
30 | /*!
31 | * @section MemoryManager
32 | * @ingroup ai_memory_manager
33 | * Macros to handle memory allocation and management as generic wrappers.
34 | * Dynamic allocations, freeing, clearing and copy are provided.
35 | * @{
36 | */
37 |
38 | #define AI_MEM_ALLOC(size, type) \
39 | ((type*)malloc((size)*sizeof(type)))
40 |
41 | #define AI_MEM_FREE(ptr) \
42 | { free((void*)(ptr)); }
43 |
44 | #define AI_MEM_CLEAR(ptr, size) \
45 | { memset((void*)(ptr), 0, (size)); }
46 |
47 | #define AI_MEM_COPY(dst, src, size) \
48 | { memcpy((void*)(dst), (const void*)(src), (size)); }
49 |
50 | #define AI_MEM_MOVE(dst, src, size) \
51 | { memmove((void*)(dst), (const void*)(src), (size)); }
52 |
53 | /*!
54 | * @brief Copy an array into another.
55 | * @ingroup ai_memory_manager
56 | * @param src the source array handle
57 | * @param dst the destination array handle
58 | * @param size the size in byte of the two arrays
59 | * @return a pointer to the destination buffer
60 | */
61 | AI_DECLARE_STATIC
62 | ai_handle ai_mem_copy_buffer(
63 | ai_handle dst, const ai_handle src, const ai_size byte_size)
64 | {
65 | AI_ASSERT(src && dst && byte_size>0)
66 | AI_MEM_COPY(dst, src, byte_size)
67 |
68 | return dst;
69 | }
70 |
71 | /*! @} */
72 |
73 | #endif /*__AI_MEMORY_MANAGER_H__*/
74 |
--------------------------------------------------------------------------------
/firmware/perftest/Middlewares/ST/AI/AI/include/core_datatypes.h:
--------------------------------------------------------------------------------
1 | /**
2 | ******************************************************************************
3 | * @file core_datatypes.h
4 | * @author AST Embedded Analytics Research Platform
5 | * @date 22-Aug-2018
6 | * @brief header file of core module private defines and datatypes
7 | * to public nor codegen tool
8 | ******************************************************************************
9 | * @attention
10 | *
11 | * © Copyright (c) 2018 STMicroelectronics.
12 | * All rights reserved.
13 | *
14 | * This software component is licensed by ST under Ultimate Liberty license
15 | * SLA0044, the "License"; You may not use this file except in compliance with
16 | * the License. You may obtain a copy of the License at:
17 | * www.st.com/SLA0044
18 | *
19 | ******************************************************************************
20 | */
21 |
22 | #ifndef __AI_CORE_DATATYPES_H_
23 | #define __AI_CORE_DATATYPES_H_
24 | #pragma once
25 | #include
26 |
27 | /*!
28 | * @defgroup Core Module Datatypes
29 | * @brief Data structures and defines used by core module
30 | */
31 |
32 | /*!
33 | * @brief platform runtime core library version
34 | */
35 | #define AI_PLATFORM_RUNTIME_MAJOR 3
36 | #define AI_PLATFORM_RUNTIME_MINOR 3
37 | #define AI_PLATFORM_RUNTIME_MICRO 0
38 |
39 | #define AI_MAGIC_CONTEXT_TOKEN (0xA1C00100) /*!< AI Cool! Magic Token */
40 |
41 | #define AI_MAGIC_INSPECTOR_TOKEN (0xA1C00101) /*!< AI Cool! Magic Token */
42 |
43 |
44 | #define AI_ID_OBJ(id) ((ai_id_obj)(id))
45 |
46 |
47 | /*!
48 | * @typedef ai_id_obj
49 | * @ingroup core_datatypes
50 | * @brief numeric identifier for generic object instances (e.g. layers,
51 | * operators, etc.) It is used by codegen tool to keep tracks of specific
52 | * instances created
53 | */
54 | typedef uint16_t ai_id_obj;
55 |
56 | #endif /*__AI_CORE_DATATYPES_H_*/
57 |
--------------------------------------------------------------------------------
/firmware/perftest/Middlewares/ST/AI/AI/include/core_utils.h:
--------------------------------------------------------------------------------
1 | /**
2 | ******************************************************************************
3 | * @file core_utils.h
4 | * @author AST Embedded Analytics Research Platform
5 | * @date 16-Aug-2018
6 | * @brief header file of core utils routines
7 | ******************************************************************************
8 | * @attention
9 | *
10 | * © Copyright (c) 2018 STMicroelectronics.
11 | * All rights reserved.
12 | *
13 | * This software component is licensed by ST under Ultimate Liberty license
14 | * SLA0044, the "License"; You may not use this file except in compliance with
15 | * the License. You may obtain a copy of the License at:
16 | * www.st.com/SLA0044
17 | *
18 | ******************************************************************************
19 | */
20 |
21 | #ifndef __CORE_UTILS_H_
22 | #define __CORE_UTILS_H_
23 | #pragma once
24 |
25 | #include "ai_platform.h"
26 | #include "ai_platform_interface.h"
27 |
28 | #include "core_common.h"
29 |
30 | AI_API_DECLARE_BEGIN
31 |
32 | /*!
33 | * @defgroup core_utils Core Utils Routines
34 | * @brief Implementation of core utils such has checksums algorithms, etc.
35 | */
36 |
37 |
38 | /*!
39 | * @brief Computes 32bit checksum from a buffer array of bytes
40 | * @ingroup core_utils
41 | * @param[in] buffer in an opaque handler to the buffer we want to compute crc code
42 | * @param[in] byte_size the size in byte of the buffer provided
43 | */
44 | AI_INTERNAL_API
45 | ai_u32 core_utils_compute_crc32(
46 | const ai_handle buffer, const ai_size byte_size);
47 |
48 | /*!
49 | * @brief Computes network signature given a network context
50 | * @ingroup core_utils
51 | * @param[in] net_ctx a pointer to the network context to be signed
52 | * @param[out] signature a pointer to the checksum signature computed
53 | * from the network context
54 | * @return false if failed to compute the signature, true otherwise
55 | */
56 | AI_INTERNAL_API
57 | ai_bool core_utils_generate_network_signature(
58 | const ai_network* net_ctx, ai_signature* signature);
59 |
60 | AI_API_DECLARE_END
61 |
62 | #endif /*__CORE_UTILS_H_*/
63 |
--------------------------------------------------------------------------------
/firmware/perftest/Middlewares/ST/AI/AI/include/datatypes_network.h:
--------------------------------------------------------------------------------
1 | /**
2 | ******************************************************************************
3 | * @file datatypes_network.h
4 | * @author AST Embedded Analytics Research Platform
5 | * @date 30-Aug-2017
6 | * @brief Definitions of code generated network types
7 | ******************************************************************************
8 | * @attention
9 | *
10 | * © Copyright (c) 2017 STMicroelectronics.
11 | * All rights reserved.
12 | *
13 | * This software component is licensed by ST under Ultimate Liberty license
14 | * SLA0044, the "License"; You may not use this file except in compliance with
15 | * the License. You may obtain a copy of the License at:
16 | * www.st.com/SLA0044
17 | *
18 | ******************************************************************************
19 | */
20 |
21 | #ifndef __DATATYPES_NETWORK_H__
22 | #define __DATATYPES_NETWORK_H__
23 | #pragma once
24 |
25 | /*
26 | * Header to be overriden by the generated version
27 | * by including with <> the include directories are searched in the order
28 | * specified in the compiler
29 | * To enable the override, put the generated path before the API path
30 | */
31 |
32 | #include "ai_platform.h"
33 |
34 | AI_API_DECLARE_BEGIN
35 |
36 | #ifdef AI_OVERRIDE_CUSTOM_TYPES
37 | #warning "Warning: Custom Types have been already defined!\n"
38 | #endif
39 |
40 | #define AI_CUSTOM_TYPES_COUNT (3)
41 |
42 | #define AI_CUSTOM_TYPES_SIGNATURE_DECLARE(name) \
43 | const ai_custom_type_signature name[AI_CUSTOM_TYPES_COUNT+1] = { \
44 | AI_CUSTOM_TYPES_COUNT, \
45 | AI_CUSTOM_SIZE(ai_shape_dimension), \
46 | AI_CUSTOM_SIZE(ai_stride_dimension), \
47 | AI_CUSTOM_SIZE(ai_array_size), \
48 | };
49 |
50 |
51 | typedef ai_u32 ai_shape_dimension;
52 | typedef ai_i32 ai_stride_dimension;
53 | typedef ai_u32 ai_array_size;
54 |
55 |
56 | AI_API_DECLARE_END
57 |
58 | #endif /*__DATATYPES_NETWORK_H__*/
59 |
--------------------------------------------------------------------------------
/firmware/perftest/Middlewares/ST/AI/AI/include/formats_list.h:
--------------------------------------------------------------------------------
1 |
2 | /* FMT_ENTRY( idx_, exp_(0/1 only), exp_name_, exp_id_, name_, type_id_,
3 | * sign_bit_, float_bit_, pbits_, bits_, fbits_, ldiv_bits_)
4 | * Specifications:
5 | - idx_ (8bits) : is an index that increased from 0 to N where N are the total
6 | number of formats
7 | - exp_ (1bit) : it is a boolean flag (0 or 1) indicating whether the format is
8 | available as a public APIs ai_buffer format. in this case the field
9 | exp_name_ indicates the enum name of the ai_buffer format
10 | - exp_id_ (8 bits): field could be any number in the range [0x0..0xff]
11 | thus
12 | - name_ : it is the enum used to define the ai_data_format.
13 | - type_id_ (4bits) : it is used to define the "family" of the format:
14 | see @ref AI_FMT_I as an example. Currently supported types are:
15 | AI_FMT_I (integer types), AI_FMT_Q (fixed point types), AI_FMT_FLOAT
16 | (floating point values), AI_FMT_LUT4 or AI_FMT_LUT8 (compressed formats)
17 | - sign_bit_ (1bit) : codes whether or not the format is of a signed type
18 | - float_bit_ (1bit) : codes if the format is float
19 | - pbits_ (4bits) : number of padding bits for the format
20 | - bits_ (7bits) : size in bits of the format (NB: integer+fractional bits)
21 | - fbits_ (7bits) : number of fractional bits for the format (for AI_FMT_Q only)
22 | - ldiv_bits (4 bits) : right shift value for computing the byte size of the format
23 |
24 | */
25 |
26 | /* Macro tricks are here:
27 | * https://github.com/pfultz2/Cloak/wiki/C-Preprocessor-tricks,-tips,-and-idioms
28 | */
29 |
30 | /* Format none entry */
31 | FMT_ENTRY( 0, 1, AI_BUFFER_FORMAT_NONE, 0x00, NONE, AI_FMT_I, 0, 0, 0, 0, 0, 0)
32 |
33 | /* Floating point formats */
34 | FMT_ENTRY( 1, 1, AI_BUFFER_FORMAT_FLOAT, 0x01, FLOAT, AI_FMT_FLOAT, 1, 1, 0, 32, 0, 0)
35 | FMT_ENTRY( 2, 0, AI_BUFFER_FORMAT_FLOAT64, 0x02, FLOAT64, AI_FMT_FLOAT, 1, 1, 0, 64, 0, 0)
36 |
37 | /* Integer formats */
38 | FMT_ENTRY( 3, 1, AI_BUFFER_FORMAT_U8, 0x10, U8, AI_FMT_I, 0, 0, 0, 8, 0, 0)
39 | FMT_ENTRY( 4, 0, AI_BUFFER_FORMAT_U16, 0x11, U16, AI_FMT_I, 0, 0, 0, 16, 0, 0)
40 | FMT_ENTRY( 5, 0, AI_BUFFER_FORMAT_U32, 0x12, U32, AI_FMT_I, 0, 0, 0, 32, 0, 0)
41 | FMT_ENTRY( 6, 0, AI_BUFFER_FORMAT_U64, 0x13, U64, AI_FMT_I, 0, 0, 0, 64, 0, 0)
42 | FMT_ENTRY( 7, 0, AI_BUFFER_FORMAT_U4, 0x14, U4, AI_FMT_I, 0, 0, 0, 4, 0, 0)
43 | FMT_ENTRY( 8, 0, AI_BUFFER_FORMAT_S8, 0x20, S8, AI_FMT_I, 1, 0, 0, 8, 0, 0)
44 | FMT_ENTRY( 9, 0, AI_BUFFER_FORMAT_S16, 0x21, S16, AI_FMT_I, 1, 0, 0, 16, 0, 0)
45 | FMT_ENTRY(10, 0, AI_BUFFER_FORMAT_S32, 0x22, S32, AI_FMT_I, 1, 0, 0, 32, 0, 0)
46 | FMT_ENTRY(11, 0, AI_BUFFER_FORMAT_S64, 0x23, S64, AI_FMT_I, 1, 0, 0, 64, 0, 0)
47 |
48 | /* Fixed-point formats including ARM CMSIS Q7, Q15, Q31 ones */
49 | FMT_ENTRY(12, 0, AI_BUFFER_FORMAT_UQ, 0x30, UQ, AI_FMT_Q, 0, 0, 0, 0, 0, 0)
50 | FMT_ENTRY(13, 1, AI_BUFFER_FORMAT_Q7, 0x31, Q7, AI_FMT_Q, 1, 0, 0, 8, 7, 0)
51 | FMT_ENTRY(14, 1, AI_BUFFER_FORMAT_Q15, 0x32, Q15, AI_FMT_Q, 1, 0, 0, 16, 15, 0)
52 | FMT_ENTRY(15, 0, AI_BUFFER_FORMAT_Q31, 0x33, Q31, AI_FMT_Q, 1, 0, 0, 32, 31, 0)
53 |
54 | /* Compressed formats */
55 | FMT_ENTRY(16, 0, AI_BUFFER_FORMAT_LUT4_FLOAT, 0x50, LUT4_FLOAT, AI_FMT_LUT4, 1, 1, 0, 32, 0, 3)
56 | FMT_ENTRY(17, 0, AI_BUFFER_FORMAT_LUT8_FLOAT, 0x51, LUT8_FLOAT, AI_FMT_LUT8, 1, 1, 0, 32, 0, 2)
57 | FMT_ENTRY(18, 0, AI_BUFFER_FORMAT_LUT4_Q15, 0x52, LUT4_Q15, AI_FMT_LUT4, 1, 0, 0, 16, 15, 2)
58 | FMT_ENTRY(19, 0, AI_BUFFER_FORMAT_LUT8_Q15, 0x53, LUT8_Q15, AI_FMT_LUT8, 1, 0, 0, 16, 15, 1)
59 |
60 | #undef FMT_ENTRY
61 |
--------------------------------------------------------------------------------
/firmware/perftest/Middlewares/ST/AI/AI/include/layers.h:
--------------------------------------------------------------------------------
1 | /**
2 | ******************************************************************************
3 | * @file layers.h
4 | * @author AST Embedded Analytics Research Platform
5 | * @date 01-May-2017
6 | * @brief header file of AI platform layers datatypes
7 | ******************************************************************************
8 | * @attention
9 | *
10 | * © Copyright (c) 2017 STMicroelectronics.
11 | * All rights reserved.
12 | *
13 | * This software component is licensed by ST under Ultimate Liberty license
14 | * SLA0044, the "License"; You may not use this file except in compliance with
15 | * the License. You may obtain a copy of the License at:
16 | * www.st.com/SLA0044
17 | *
18 | ******************************************************************************
19 | */
20 |
21 | #ifndef __LAYERS_H_
22 | #define __LAYERS_H_
23 | #pragma once
24 |
25 | #include "layers_common.h"
26 | #include "layers_conv2d.h"
27 | #include "layers_generic.h"
28 | #include "layers_nl.h"
29 | #include "layers_norm.h"
30 | #include "layers_pool.h"
31 | #include "layers_rnn.h"
32 |
33 | #ifdef USE_OPERATORS
34 | #include "layers_lambda.h"
35 | #endif /* USE_OPERATORS */
36 |
37 |
38 | AI_API_DECLARE_BEGIN
39 |
40 | /*!
41 | * @defgroup layers Layers
42 | * @brief Definition of the forward functions for the layers and the general
43 | * ai_layer datastructure used to abstract specific layer implementation in the
44 | * generic forward function definition
45 | *
46 | * The forward function for a layer computes the layer activations given the
47 | * activations of the previous layer. They are added to the layer as function
48 | * pointer and called implicitly by the @ref ai_layers_forward_all function.
49 | * The input activations are read from layer → in and the computed
50 | * activations stored in layer → out. The layer type needs to be compatible
51 | * with the forward function, but layers with the same layout (e.g. `mp` and
52 | * `ap`) can share the same structure.
53 | */
54 |
55 | /******************************************************************************/
56 | /* Forward Functions Section */
57 | /******************************************************************************/
58 |
59 | /*!
60 | * @brief Executes a single layer in the network.
61 | * @ingroup layers
62 | * @param layer the layer to process
63 | * @return pointer to the next layer
64 | */
65 | AI_INTERNAL_API
66 | ai_layer* ai_layers_forward_layer(ai_layer* layer);
67 |
68 |
69 | /*!
70 | * @brief Computes the ouptut of the network given the input.
71 | * @ingroup layers
72 | *
73 | * Given a network with the input pre-loaded in the net → in tensor,
74 | * computes the output by calling the forward functions of each layer and
75 | * selecting the next layer. When the layer has no successor or it's in a
76 | * loop-back configuration (layer → next is again layer), the function
77 | * stops. The result is stored in net → out.
78 | *
79 | * @param net the network to evaluate
80 | */
81 | AI_INTERNAL_API
82 | void ai_layers_forward_all(ai_network* net);
83 |
84 | AI_API_DECLARE_END
85 |
86 | #endif /* __LAYERS_H_ */
87 |
--------------------------------------------------------------------------------
/firmware/perftest/Middlewares/ST/AI/AI/include/layers_lambda.h:
--------------------------------------------------------------------------------
1 | /**
2 | ******************************************************************************
3 | * @file layers_lambda.h
4 | * @author AST Embedded Analytics Research Platform
5 | * @date 30-Lug-2018
6 | * @brief header file of AI platform lambda layers datatype
7 | ******************************************************************************
8 | * @attention
9 | *
10 | * © Copyright (c) 2018 STMicroelectronics.
11 | * All rights reserved.
12 | *
13 | * This software component is licensed by ST under Ultimate Liberty license
14 | * SLA0044, the "License"; You may not use this file except in compliance with
15 | * the License. You may obtain a copy of the License at:
16 | * www.st.com/SLA0044
17 | *
18 | ******************************************************************************
19 | */
20 | #ifndef __LAYERS_LAMBDA_H_
21 | #define __LAYERS_LAMBDA_H_
22 | #pragma once
23 |
24 | #include "layers_common.h"
25 |
26 | #ifdef USE_OPERATORS
27 |
28 | #include "operators_common.h"
29 |
30 | /*!
31 | * @defgroup layers_lambda Lambda layer definitions
32 | * @brief Definition of structures for Lambda layers and generic operator
33 | * containers. These layers require the inclusion of the operator module.
34 | */
35 |
36 | AI_API_DECLARE_BEGIN
37 |
38 |
39 | /*!
40 | * @struct ai_layer_lambda
41 | * @ingroup layers_lambda
42 | * @brief Lambda layer wrapper
43 | *
44 | * The lambda layer wrapper includes a sub-graph of operators.
45 | */
46 | typedef AI_ALIGNED_TYPE(struct, 4) ai_layer_lambda_ {
47 | AI_LAYER_COMMON_FIELDS_DECLARE
48 | ai_operator* sub_graph;
49 | } ai_layer_lambda;
50 |
51 | /*!
52 | * @struct ai_layer_container
53 | * @ingroup layers_lambda
54 | * @brief Container layer
55 | *
56 | * The container layer wraps generic operator in order to use them as layers
57 | * in a network structure.
58 | */
59 | typedef AI_ALIGNED_TYPE(struct, 4) ai_layer_container_ {
60 | AI_LAYER_COMMON_FIELDS_DECLARE
61 | struct ai_operator_* operators;
62 | } ai_layer_container;
63 |
64 |
65 | /******************************************************************************/
66 | /* Forward Functions Section */
67 | /******************************************************************************/
68 |
69 | /*!
70 | * @brief Computes the activations of a lambda layer.
71 | * @ingroup layers_generic
72 | *
73 | * Container forward layer function. This forward function
74 | * implements the activation of the operators chain.
75 | * @param layer the container layer
76 | */
77 | AI_INTERNAL_API
78 | void forward_container(ai_layer* layer);
79 |
80 | /*!
81 | * @brief Computes the activations of a lambda layer.
82 | * @ingroup layers_lambda
83 | * @param layer the lambda layer
84 | */
85 | AI_INTERNAL_API
86 | void forward_lambda(ai_layer* layer);
87 |
88 | AI_API_DECLARE_END
89 |
90 | #endif /* USE_OPERATORS */
91 |
92 | #endif /*__LAYERS_LAMBDA_H_*/
93 |
--------------------------------------------------------------------------------
/firmware/perftest/Middlewares/ST/AI/AI/include/layers_list.h:
--------------------------------------------------------------------------------
1 | /**
2 | ******************************************************************************
3 | * @file layers_list.h
4 | * @author AST Embedded Analytics Research Platform
5 | * @date 20-Jul-2018
6 | * @brief header file of AI platform layers datatypes
7 | ******************************************************************************
8 | * @attention
9 | *
10 | * © Copyright (c) 2018 STMicroelectronics.
11 | * All rights reserved.
12 | *
13 | * This software component is licensed by ST under Ultimate Liberty license
14 | * SLA0044, the "License"; You may not use this file except in compliance with
15 | * the License. You may obtain a copy of the License at:
16 | * www.st.com/SLA0044
17 | *
18 | ******************************************************************************
19 | */
20 |
21 |
22 | /* No sentry. This is deliberate!! */
23 | /* Template: LAYER_ENTRY(type_, id_, struct_, forward_func_)
24 | * Where:
25 | * - type_ is the (enum) type name of the layer. to have the complete enum
26 | * value you should use the macro @ref AI_LAYER_TYPE_ENTRY(type_) that adds
27 | * the specific prefix and postfix tokens to the type_
28 | * - id_ is the numeric id of the layer
29 | * - struct_ is the name of the datastruct of the layer
30 | * - forward_func_ is the forward function name of the routine implementing
31 | * actual layer processing
32 | */
33 |
34 | /*!< Elementwise addition layer */
35 | LAYER_ENTRY(ADD, 10001, ai_layer_add, forward_add)
36 | /*!< Batch normalization layer */
37 | LAYER_ENTRY(BN, 10002, ai_layer_bn, forward_bn)
38 | /*!< 2D Convolutional layer */
39 | LAYER_ENTRY(CONV2D, 10004, ai_layer_conv2d, forward_conv2d)
40 | /*!< Dense layer */
41 | LAYER_ENTRY(DENSE, 10005, ai_layer_dense, forward_dense)
42 | /*!< Gated Recurrent Unit layer */
43 | LAYER_ENTRY(GRU, 10006, ai_layer_gru, forward_gru)
44 | /*!< Local Response Normalization layer */
45 | LAYER_ENTRY(LRN, 10007, ai_layer_lrn, forward_lrn)
46 | /*!< Long Short Time Memory layer */
47 | LAYER_ENTRY(LSTM, 10008, ai_layer_lstm, forward_lstm)
48 | /*!< Nonlinearity layer */
49 | LAYER_ENTRY(NL, 10009, ai_layer_nl, forward_nl)
50 | /*!< Normalization layer */
51 | LAYER_ENTRY(NORM, 10010, ai_layer_norm, forward_norm)
52 | /*!< Merged Conv2d / Pool layer */
53 | LAYER_ENTRY(OPTIMIZED_CONV2D, 10011, ai_layer_conv2d_nl_pool, forward__conv2d_nl_pool)
54 | /*!< Permute Tensor layer */
55 | LAYER_ENTRY(PERMUTE, 10012, ai_layer_permute, forward_permute)
56 | /*!< Pooling layer */
57 | LAYER_ENTRY(POOL, 10013, ai_layer_pool, forward_pool)
58 | /*!< Softmax layer */
59 | LAYER_ENTRY(SM, 10014, ai_layer_sm, forward_sm)
60 | /*!< Slice layer */
61 | LAYER_ENTRY(SPLIT, 10015, ai_layer_split, forward_split)
62 | /*!< TimeDelay layer */
63 | LAYER_ENTRY(TIME_DELAY, 10016, ai_layer_time_delay, forward_time_delay)
64 | /*!< TimeDistributed layer */
65 | LAYER_ENTRY(TIME_DISTRIBUTED, 10017, ai_layer_time_distributed, forward_time_distributed)
66 | /*!< Concat Tensor layer */
67 | LAYER_ENTRY(CONCAT, 10019, ai_layer_concat, forward_concat)
68 | #ifdef USE_OPERATORS
69 | /*!< Container layer for operators */
70 | LAYER_ENTRY(CONTAINER, 10003, ai_layer_container, forward_container)
71 | /*!< Container layer for operators */
72 | LAYER_ENTRY(LAMBDA, 10018, ai_layer_lambda, forward_lambda)
73 | #endif
74 | #undef LAYER_ENTRY
75 |
--------------------------------------------------------------------------------
/firmware/perftest/Middlewares/ST/AI/AI/include/layers_override.h:
--------------------------------------------------------------------------------
1 | /**
2 | ******************************************************************************
3 | * @file layers_override.h
4 | * @author AST Embedded Analytics Research Platform
5 | * @date 18-Apr-2018
6 | * @brief header file of AI platform layers override defines
7 | ******************************************************************************
8 | * @attention
9 | *
10 | * © Copyright (c) 2018 STMicroelectronics.
11 | * All rights reserved.
12 | *
13 | * This software component is licensed by ST under Ultimate Liberty license
14 | * SLA0044, the "License"; You may not use this file except in compliance with
15 | * the License. You may obtain a copy of the License at:
16 | * www.st.com/SLA0044
17 | *
18 | ******************************************************************************
19 | */
20 | #ifndef __LAYERS_OVERRIDE_H_
21 | #define __LAYERS_OVERRIDE_H_
22 | #pragma once
23 | #include "ai_common_config.h"
24 |
25 | /*!
26 | * @defgroup layers Layers Override Definitions
27 | * @brief definition
28 | *
29 | */
30 |
31 | #ifdef HAS_STM32
32 | #endif
33 |
34 | #endif /*__LAYERS_OVERRIDE_H_*/
35 |
--------------------------------------------------------------------------------
/firmware/perftest/Middlewares/ST/AI/AI/include/platform_config.h:
--------------------------------------------------------------------------------
1 | #ifndef __AI_PLATFORM_CONFIG_H__
2 | #define __AI_PLATFORM_CONFIG_H__
3 |
4 | #define AI_PLATFORM_RUNTIME_REVISION_ID "(rev-0460b0fafaa76f402b64e43edb97aa0604fd2d3d)"
5 |
6 | #endif /*__AI_PLATFORM_CONFIG_H__*/
7 |
--------------------------------------------------------------------------------
/firmware/perftest/Middlewares/ST/AI/AI/lib/network_runtime.a:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jonnor/ESC-CNN-microcontroller/572c319c7ad4d0a98bf210d59b26f6df923c8e7b/firmware/perftest/Middlewares/ST/AI/AI/lib/network_runtime.a
--------------------------------------------------------------------------------
/firmware/perftest/Middlewares/ST/Application/SystemPerformance/Inc/aiSystemPerformance.h:
--------------------------------------------------------------------------------
1 | /**
2 | ******************************************************************************
3 | * @file aiSystemPerformance.h
4 | * @author MCD Vertical Application Team
5 | * @brief Entry points for AI system performance application
6 | ******************************************************************************
7 | * @attention
8 | *
9 | * © Copyright (c) YYYY STMicroelectronics.
10 | * All rights reserved.
11 | *
12 | * This software component is licensed by ST under Ultimate Liberty license
13 | * SLA0044, the "License"; You may not use this file except in compliance with
14 | * the License. You may obtain a copy of the License at:
15 | * www.st.com/SLA0044
16 | *
17 | ******************************************************************************
18 | */
19 |
20 | #ifndef __AI_SYSTEM_PERFORMANCE_H__
21 | #define __AI_SYSTEM_PERFORMANCE_H__
22 |
23 | #include
24 |
25 | #ifdef __cplusplus
26 | extern "C" {
27 | #endif
28 |
29 | int aiSystemPerformanceInit(void);
30 | int aiSystemPerformanceProcess(void);
31 | void aiSystemPerformanceDeInit(void);
32 |
33 | #ifdef __cplusplus
34 | }
35 | #endif
36 |
37 | #endif /* __AI_SYSTEM_PERFORMANCE_H__ */
38 |
--------------------------------------------------------------------------------
/firmware/perftest/flash.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | ######## Modify this Section:
4 | # 1) Set the Installation path for OpenOCD
5 | # example:
6 | #OpenOCD_DIR="C:/Ac6/SystemWorkbench/plugins/fr.ac6.mcu.externaltools.openocd.win32_1.21.0.201811131241/tools/openocd"
7 | OpenOCD_DIR=""
8 |
9 | # 2) Set the installation path for stm32 OpenOCD scritps
10 | # example:
11 | #OpenOCD_CFC="C:/Ac6/SystemWorkbench/plugins/fr.ac6.mcu.debug_2.3.1.201811131241/resources/openocd/scripts"
12 | OpenOCD_CFC=""
13 |
14 | # 3) Only for Linux/iOS add openocd library path to _LIBRARY_PATH:
15 | # For iOS example:
16 | #export DYLD_LIBRARY_PATH=${DYLD_LIBRARY_PATH}:${OpenOCD_DIR}"lib/"
17 |
18 | # For Linux example:
19 | #export LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:${OpenOCD_DIR}"lib/"
20 |
21 | ######## Don't change the following part
22 |
23 | ## Control Section
24 |
25 | if [[ ! $OpenOCD_DIR ]]; then
26 | echo "Please add the rigth path to OpenOCD_DIR Variable"
27 | exit
28 | fi
29 |
30 | if [[ ! $OpenOCD_CFC ]]; then
31 | echo "Please add the rigth path to OpenOCD_CFC Variable"
32 | exit
33 | fi
34 |
35 |
36 | ## Run section
37 |
38 | # Board type
39 | BOARDNAME="nucleo_l476rg"
40 |
41 | # OpenOCD command
42 | OpenOCD_CMD="${OpenOCD_DIR}/bin/openocd -s ${OpenOCD_CFC} -f st_board/${BOARDNAME}.cfg"
43 |
44 |
45 | echo "/******************************************/"
46 | echo " Clean FP-AI-SENSING1"
47 | echo "/******************************************/"
48 | echo " Full Chip Erase"
49 | echo "/******************************************/"
50 | ${OpenOCD_CMD} -c "init" -c "reset halt" -c "flash erase_sector 0 511 511" -c "shutdown"
51 | echo "/******************************************/"
52 | echo " Install BootLoader"
53 | echo "/******************************************/"
54 | ${OpenOCD_CMD} -c "init" -c "reset halt" -c "flash write_image erase ../../../../../../Utilities/BootLoader/STM32L476RG/BootLoaderL4.bin 0x08000000 bin" -c "shutdown"
55 | echo "/******************************************/"
56 | echo " Install FP-AI-SENSING1"
57 | echo "/******************************************/"
58 | ${OpenOCD_CMD} -c "init" -c "reset halt" -c "flash write_image erase ./SENSING1.bin 0x08004000 bin" -c "shutdown"
59 | echo "/******************************************/"
60 | echo " Dump FP-AI-SENSING1 + BootLoader"
61 | echo "/******************************************/"
62 |
63 | SizeBinBL=`ls -l ./SENSING1.bin | awk '{print $6+0x4000};'`
64 | ${OpenOCD_CMD} -c "init" \
65 | -c "reset halt" \
66 | -c "dump_image ./SENSING1_BL.bin 0x08000000 ${SizeBinBL}" \
67 | -c "shutdown"
68 |
69 |
--------------------------------------------------------------------------------
/firmware/perftest/readperf.py:
--------------------------------------------------------------------------------
1 |
2 | import re
3 | import json
4 |
5 | import serial
6 |
7 | def read_report(ser):
8 |
9 | lines = []
10 | state = 'wait-for-start'
11 |
12 | while state != 'ended':
13 | raw = ser.readline()
14 | line = raw.decode('utf-8').strip()
15 |
16 | if state == 'wait-for-start':
17 | if line.startswith('Results for'):
18 | state = 'started'
19 |
20 | if state == 'started':
21 | lines.append(line)
22 | if line.endswith('cfg=0'):
23 | state = 'ended'
24 |
25 | return '\n'.join(lines)
26 |
27 |
28 | example_report = """
29 | Results for "network", 16 inferences @80MHz/80MHz (complexity: 2980798 MACC)
30 | duration : 325.142 ms (average)
31 | CPU cycles : 26011387 -156/+90 (average,-/+)
32 | CPU Workload : 32%
33 | cycles/MACC : 8.72 (average for all layers)
34 | used stack : 276 bytes
35 | used heap : 0:0 0:0 (req:allocated,req:released) cfg=0
36 | """
37 |
38 | def parse_report(report):
39 | out = {}
40 |
41 | result_regexp = r'@(\d*)MHz\/(\d*)MHz.*complexity:\s(\d*)\sMACC'
42 | matches = list(re.finditer(result_regexp, report, re.MULTILINE))
43 | cpu_freq, cpu_freq_max, macc = matches[0].groups()
44 | out['cpu_mhz'] = int(cpu_freq)
45 | out['macc'] = int(macc)
46 |
47 | key_value_regex = r'(.*)\s:\s(.*)'
48 | matches = re.finditer(key_value_regex, report, re.MULTILINE)
49 | for matchNum, match in enumerate(matches, start=1):
50 | key, value = match.groups()
51 | key = key.strip()
52 | value = value.strip()
53 | if key == 'used stack':
54 | out['stack'] = int(value.rstrip(' bytes'))
55 | if key == 'duration':
56 | out['duration_avg'] = float(value.rstrip(' ms (average)'))/1000
57 | if key == 'CPU cycles':
58 | out['cycles_avg'] = int(value.split()[0])
59 |
60 | out['cycles_macc'] = out['cycles_avg'] / out['macc']
61 | return out
62 |
63 |
64 | def test_parse_report():
65 | out = parse_report(example_report)
66 |
67 | assert out['duration_avg'] == 0.325142
68 | assert out['cycles_avg'] == 26011387
69 | assert out['stack'] == 276
70 | assert out['cpu_mhz'] == 80
71 | assert out['macc'] == 2980798
72 |
73 |
74 | def main():
75 | test_parse_report()
76 |
77 | device = '/dev/ttyACM0'
78 | baudrate = 115200
79 |
80 | with serial.Serial(device, baudrate, timeout=0.5) as ser:
81 |
82 | # avoid reading stale data
83 | thrash = ser.read(10000)
84 |
85 | report = read_report(ser)
86 | out = parse_report(report)
87 | print(json.dumps(out))
88 |
89 |
90 | if __name__ == '__main__':
91 | main()
92 |
93 |
94 |
95 |
--------------------------------------------------------------------------------
/jobs.py:
--------------------------------------------------------------------------------
1 |
2 | from microesc import jobs
3 | jobs.main()
4 |
--------------------------------------------------------------------------------
/microesc/__init__.py:
--------------------------------------------------------------------------------
1 |
2 |
3 |
--------------------------------------------------------------------------------
/microesc/common.py:
--------------------------------------------------------------------------------
1 |
2 | import os.path
3 |
4 | import yaml
5 |
6 | def ensure_dir(directory):
7 | if not os.path.exists(directory):
8 | os.makedirs(directory)
9 |
10 |
11 | def ensure_dir_for_file(path):
12 | directory = os.path.dirname(path)
13 | ensure_dir(directory)
14 |
15 | def ensure_directories(*dirs):
16 | for dir in dirs:
17 | ensure_dir(dir)
18 |
19 |
20 | def add_arguments(parser):
21 | a = parser.add_argument
22 |
23 | a('--datasets', dest='datasets_dir', default='./data/datasets',
24 | help='%(default)s')
25 | a('--features', dest='features_dir', default='./data/features',
26 | help='%(default)s')
27 | a('--models', dest='models_dir', default='./data/models',
28 | help='%(default)s')
29 |
30 | a('--settings', dest='settings_path', default='./experiments/ldcnn20k60.yaml',
31 | help='%(default)s')
32 |
33 | def load_settings_path(path):
34 | with open(path, 'r') as config_file:
35 | settings = yaml.load(config_file.read())
36 |
37 | return settings
38 |
--------------------------------------------------------------------------------
/microesc/models/__init__.py:
--------------------------------------------------------------------------------
1 |
2 | from . import sbcnn, piczakcnn
3 | from . import strided
4 | from . import ldcnn, dcnn
5 | from . import mobilenet, effnet, dense, squeezenet
6 | from . import dmix, dilated
7 | from . import skm, speech
8 |
9 | families = {
10 | 'piczakcnn': piczakcnn.build_model,
11 | 'sbcnn': sbcnn.build_model,
12 | 'ldcnn': ldcnn.ldcnn_nodelta,
13 | 'dcnn': dcnn.dcnn_nodelta,
14 | 'mobilenet': mobilenet.build_model,
15 | 'effnet': effnet.build_model,
16 | 'skm': skm.build_model,
17 | 'strided': strided.build_model,
18 | 'squeezenet': squeezenet.build_model,
19 | }
20 |
21 | def build(settings):
22 |
23 | builder = families.get(settings['model'])
24 |
25 | options = dict(
26 | frames=settings['frames'],
27 | bands=settings['n_mels'],
28 | channels=settings.get('channels', 1),
29 | )
30 |
31 | known_settings = [
32 | 'conv_size',
33 | 'conv_block',
34 | 'downsample_size',
35 | 'n_stages',
36 | 'dropout',
37 | 'fully_connected',
38 | 'n_blocks_per_stage',
39 | 'filters',
40 | ]
41 | for k in known_settings:
42 | v = settings.get(k, None)
43 | options[k] = v
44 |
45 | model = builder(**options)
46 | return model
47 |
48 |
--------------------------------------------------------------------------------
/microesc/models/crnn.py:
--------------------------------------------------------------------------------
1 |
2 |
3 | """
4 |
5 | Inspired by CRNN model described in
6 |
7 | Sound Event Detection: A Tutorial
8 | https://arxiv.org/abs/2107.05463
9 |
10 | and
11 |
12 | Convolutional Recurrent Neural Networks for Polyphonic Sound Event Detection
13 | https://arxiv.org/abs/1702.06286
14 | """
15 |
16 | # related code, https://chadrick-kwag.net/tf-keras-rnn-ctc-example/
17 |
18 | def build_model(frames=128, bands=40, channels=1, n_classes=10,
19 | conv_size=(3,3),
20 | conv_block='conv',
21 | downsample_size=(2,2),
22 | n_stages=3, n_blocks_per_stage=1,
23 | filters=128, kernels_growth=1.0,
24 | fully_connected=64,
25 | rnn_units=32,
26 | temporal='bigru',
27 | dropout=0.5, l2=0.001, backend='detection'):
28 |
29 |
30 | from tensorflow.keras import Model, Sequential
31 | from tensorflow.keras.layers import \
32 | Conv2D, LSTM, GRU, Bidirectional, MaxPooling2D, \
33 | Reshape, TimeDistributed, Softmax, Dense, SeparableConv2D
34 |
35 | model = Sequential()
36 |
37 | input_shape = (frames, bands, channels)
38 |
39 | def add_conv_block(model, downsample_size, conv_filters=filters, kernel_size=conv_size,
40 | **kwargs):
41 | model.add(SeparableConv2D(conv_filters, conv_size, **kwargs))
42 | model.add(MaxPooling2D(downsample_size))
43 |
44 | # TODO: add ReLu
45 | # TODO: BatchNorm etc?
46 |
47 | # Convolutional layers
48 | add_conv_block(model, downsample_size=(1,5), input_shape=input_shape)
49 | add_conv_block(model, downsample_size=(1,2))
50 | add_conv_block(model, downsample_size=(1,2))
51 |
52 | # Temporal processing
53 | if temporal == 'bigru':
54 | o = model.layers[-1].output_shape
55 | model.add(Reshape((o[1], -1)))
56 | model.add(Bidirectional(GRU(rnn_units, return_sequences=True)))
57 | model.add(Bidirectional(GRU(rnn_units, return_sequences=True)))
58 | elif temporal == 'tcn':
59 | # TODO: make downsampling adjustable
60 | model.add(SeparableConv2D(rnn_units, (9, 1), strides=(2,1)))
61 | model.add(SeparableConv2D(rnn_units, (9, 1), strides=(2,1)))
62 | else:
63 | raise ValueError(f"Unknown temporal parameter {temporal}")
64 |
65 | # Output
66 | # TODO: support multiple layers
67 | # TODO: add Dropout
68 | o = model.layers[-1].output_shape
69 | if backend == 'classification':
70 | model.add(TimeDistributed(Dense(fully_connected, activation="linear")))
71 | model.add(layers.Dense(n_classes))
72 | model.add(Softmax())
73 |
74 | elif backend == 'detection':
75 | #model.add(TimeDistributed(Dense(fully_connected, activation="linear")))
76 | model.add(TimeDistributed(Dense(n_classes, activation="linear"), input_shape=(o[1], o[2])))
77 | model.add(Softmax())
78 | elif not backend:
79 | pass # no backend
80 | else:
81 | raise ValueError(f"Unsupported backend '{backend}'")
82 |
83 | return model
84 |
85 |
86 | def test_model():
87 |
88 | model = build_model(filters=24, bands=64, rnn_units=16, n_classes=3, temporal='tcn')
89 |
90 | print(model.summary())
91 |
92 |
93 | if __name__ == '__main__':
94 | test_model()
95 |
96 |
--------------------------------------------------------------------------------
/microesc/models/dcnn.py:
--------------------------------------------------------------------------------
1 |
2 | def dcnn_head(input, head_name, filters=80, kernel=(3,3)):
3 | def n(base):
4 | return base+'_'+head_name
5 |
6 | from keras.layers import Convolution2D, Flatten, MaxPooling2D
7 |
8 | x = input
9 | x = Convolution2D(filters, kernel, dilation_rate=(1,1), name=n('DilaConv1'))(x)
10 | x = MaxPooling2D(pool_size=(4,3), name=n('MPL1'))(x)
11 | x = Convolution2D(filters, kernel, dilation_rate=(2,2), name=n('DilaConv2'))(x)
12 | x = MaxPooling2D(pool_size=(1,3), name=n('MPL2'))(x)
13 |
14 | x = Flatten(name=n('flatten'))(x)
15 | return x
16 |
17 | def dcnn(bands=60, frames=31, n_classes=10, fully_connected=5000, filters=80, activation='relu'):
18 | """
19 | Dilated Convolution Neural Network with LeakyReLU for Environmental Sound Classification
20 |
21 | https://ieeexplore.ieee.org/document/8096153
22 | """
23 | # XXX: kernel size is missing from paper
24 |
25 | from keras.models import Sequential, Model
26 | from keras.layers import Dense, Dropout, Activation, Input, Concatenate
27 | import keras.layers
28 |
29 | input_shape = (bands, frames, 1)
30 |
31 | def head(input, name):
32 | return dcnn_head(input, name, filters)
33 |
34 | mel_input = Input(shape=input_shape, name='mel_input')
35 | delta_input = Input(shape=input_shape, name='delta_input')
36 | heads = [
37 | head(mel_input, 'mel'),
38 | head(delta_input, 'delta')
39 | ]
40 | m = keras.layers.concatenate(heads)
41 | m = Dense(fully_connected, activation=activation)(m)
42 | m = Dense(fully_connected, activation=activation)(m)
43 | m = Dense(n_classes, activation='softmax')(m)
44 |
45 | model = Model([mel_input, delta_input], m)
46 |
47 | return model
48 |
49 |
50 | def dcnn_nodelta(bands=60, frames=31, n_classes=10, channels=1,
51 | fully_connected=5000, filters=80, activation='relu'):
52 |
53 | from keras.models import Sequential, Model
54 | from keras.layers import Dense, Dropout, Activation, Input, Concatenate
55 | import keras.layers
56 |
57 | input_shape = (bands, frames, channels)
58 | def head(input, name):
59 | return dcnn_head(input, name, filters)
60 |
61 | mel_input = Input(shape=input_shape, name='mel_input')
62 | m = head(mel_input, 'mel')
63 | m = Dense(fully_connected, activation=activation)(m)
64 | m = Dense(fully_connected, activation=activation)(m)
65 | m = Dense(n_classes, activation='softmax')(m)
66 |
67 | model = Model(mel_input, m)
68 | return model
69 |
70 |
71 | def main():
72 | m = dcnn()
73 | m.save('dcnn.hdf5')
74 | m.summary()
75 |
76 | m = dcnn_nodelta()
77 | m.save('dcnn.nodelta.hdf5')
78 | m.summary()
79 |
80 | if __name__ == '__main__':
81 | main()
82 |
--------------------------------------------------------------------------------
/microesc/models/dense.py:
--------------------------------------------------------------------------------
1 |
2 | def build_model(bands=60, frames=41, channels=1, n_labels=10,
3 | dropout=0.0, depth=7, block=2, growth=15, pooling='avg',
4 | bottleneck=False, reduction=0.0, subsample=True):
5 | """
6 | DenseNet
7 | """
8 |
9 | # https://github.com/keras-team/keras-contrib/blob/master/keras_contrib/applications/densenet.py
10 | from keras_contrib.applications import densenet
11 |
12 | input_shape = (bands, frames, channels)
13 |
14 | model = densenet.DenseNet(input_shape=input_shape, pooling=pooling,
15 | depth=depth, nb_dense_block=block, growth_rate=growth,
16 | bottleneck=bottleneck, reduction=reduction,
17 | subsample_initial_block=subsample,
18 | include_top=True, classes=n_labels, dropout_rate=dropout)
19 |
20 | return model
21 |
22 | def main():
23 | m = build_model()
24 | m.save('densenet.hdf5')
25 |
26 | m.summary()
27 |
28 | if __name__ == '__main__':
29 | main()
30 |
--------------------------------------------------------------------------------
/microesc/models/dilated.py:
--------------------------------------------------------------------------------
1 |
2 |
3 | def dilaconv(bands=64, frames=41, channels=2,
4 | dilation=(2,2), kernel=(3,3), n_labels=10, dropout=0.5,
5 | kernels=[32, 32, 64, 64]):
6 | """
7 | Environmental sound classification with dilated convolutions
8 |
9 | https://www.sciencedirect.com/science/article/pii/S0003682X18306121
10 | """
11 |
12 | from keras.models import Sequential
13 | from keras.layers import Dense, Dropout, Activation, Flatten
14 | from keras.layers import Convolution2D, GlobalAveragePooling2D
15 | from keras.regularizers import l2
16 |
17 | input_shape = (bands, frames, channels)
18 |
19 | # XXX: number of kernels in original paper is unknown
20 | conv = [
21 | Convolution2D(kernels[0], kernel, input_shape=input_shape, activation='relu')
22 | ]
23 | for k in kernels[1:]:
24 | c = Convolution2D(k, kernel, dilation_rate=dilation, activation='relu')
25 | conv.append(c)
26 |
27 | model = Sequential(conv + [
28 | GlobalAveragePooling2D(),
29 | Dropout(dropout),
30 | Dense(n_labels, activation='softmax'),
31 | ])
32 |
33 | return model
34 |
35 | def main():
36 | m = dilaconv()
37 | m.summary()
38 | m.save('dilaconv.hdf5')
39 |
40 | m = ldcnn()
41 | m.save('ldcnn.hdf5')
42 | m.summary()
43 |
44 | m = ldcnn_nodelta()
45 | m.save('ldcnn.nodelta.hdf5')
46 | m.summary()
47 |
48 | if __name__ == '__main__':
49 | main()
50 |
--------------------------------------------------------------------------------
/microesc/models/dmix.py:
--------------------------------------------------------------------------------
1 |
2 |
3 | def build_model(bands=128, frames=128, channels=2, n_classes=10,
4 | filters=80, L=57, W=6, fully_connected=5000):
5 |
6 | """
7 | Deep Convolutional Neural Network with Mixup for Environmental Sound Classification
8 |
9 | https://link.springer.com/chapter/10.1007/978-3-030-03335-4_31
10 | """
11 |
12 | from keras.models import Sequential, Model
13 | from keras.layers import Dense, Dropout, Activation, Input, Concatenate
14 | from keras.layers import Convolution2D, Flatten, MaxPooling2D
15 | import keras.layers
16 |
17 | input_shape = (bands, frames, channels)
18 |
19 | # FIXME: add missing BatchNormalization and Dropout and L2 regularization
20 | model = Sequential([
21 | Convolution2D(32, (3,7), padding='same', input_shape=input_shape),
22 | Convolution2D(32, (3,5), padding='same'),
23 | MaxPooling2D(pool_size=(4,3)),
24 | Convolution2D(64, (3,1), padding='same'),
25 | Convolution2D(64, (3,1), padding='same'),
26 | MaxPooling2D(pool_size=(4,1)),
27 | Convolution2D(128, (1,5), padding='same'),
28 | Convolution2D(128, (1,5), padding='same'),
29 | MaxPooling2D(pool_size=(1,3)),
30 | Convolution2D(256, (3,3), padding='same'),
31 | Convolution2D(256, (3,3), padding='same'),
32 | MaxPooling2D(pool_size=(2,2)),
33 | Dense(512, activation='relu'),
34 | Dense(n_classes, activation='softmax')
35 | ])
36 |
37 | return model
38 |
39 |
40 | def main():
41 | m = build_model()
42 | m.summary()
43 | m.save('dmix.orig.hdf5')
44 |
45 |
46 | if __name__ == '__main__':
47 | main()
48 |
--------------------------------------------------------------------------------
/microesc/models/effnet.py:
--------------------------------------------------------------------------------
1 |
2 | """
3 |
4 | Based on https://github.com/arthurdouillard/keras-effnet/blob/master/effnet.py"""
5 |
6 | from keras.models import Model
7 | from keras.layers import *
8 | from keras.activations import *
9 |
10 | def get_post(x_in):
11 | #x = LeakyReLU()(x_in) # unsupported by STM32AI
12 | x = Activation('relu')(x_in)
13 | x = BatchNormalization()(x)
14 | return x
15 |
16 | def get_block(x_in, ch_in, ch_out, kernel=3, downsample=2, strides=(1,1)):
17 | x = Conv2D(ch_in,
18 | kernel_size=(1, 1),
19 | strides=strides,
20 | padding='same',
21 | use_bias=False)(x_in)
22 | x = get_post(x)
23 |
24 | x = DepthwiseConv2D(kernel_size=(1, kernel), padding='same', use_bias=False)(x)
25 | x = get_post(x)
26 | x = MaxPool2D(pool_size=(downsample, 1),
27 | strides=(downsample, 1))(x) # Separable pooling
28 |
29 | x = DepthwiseConv2D(kernel_size=(kernel, 1),
30 | padding='same',
31 | use_bias=False)(x)
32 | x = get_post(x)
33 |
34 | x = Conv2D(ch_out,
35 | kernel_size=(downsample, 1),
36 | strides=(1, downsample),
37 | padding='same',
38 | use_bias=False)(x)
39 | x = get_post(x)
40 |
41 | return x
42 |
43 |
44 | def Effnet(input_shape, nb_classes, n_blocks=2,
45 | initial_filters=16, filter_growth=2.0, dropout=0.5, kernel=5, downsample=2, pool=None,
46 | include_top='flatten', weights=None):
47 |
48 | if getattr(kernel, '__iter__', None):
49 | assert kernel[0] == kernel[1]
50 | kernel = kernel[0]
51 |
52 | x_in = Input(shape=input_shape)
53 | x = x_in
54 |
55 | for block_no in range(n_blocks):
56 | filters_in = int(initial_filters*(filter_growth**block_no))
57 | filters_out = int(initial_filters*(filter_growth**(block_no+1)))
58 | strides = (2, 2) if block_no == 0 else (1, 1) # reduce RAM
59 | x = get_block(x, filters_in, filters_out,
60 | kernel=kernel, downsample=downsample, strides=strides)
61 |
62 | if include_top == 'flatten':
63 | x = Flatten()(x)
64 | x = Dropout(dropout)(x)
65 | x = Dense(nb_classes, activation='softmax')(x)
66 | elif include_top == 'conv':
67 | # MobileNetv1 style
68 | x = GlobalAveragePooling2D()(x)
69 | shape = (1, 1, filters_out)
70 | x = Reshape(shape)(x)
71 | x = Dropout(dropout)(x)
72 | x = Conv2D(nb_classes, (1, 1), padding='same')(x)
73 | x = Activation('softmax', name='act_softmax')(x)
74 | x = Reshape((nb_classes,))(x)
75 |
76 | model = Model(inputs=x_in, outputs=x)
77 |
78 | if weights is not None:
79 | model.load_weights(weights, by_name=True)
80 |
81 | return model
82 |
83 | def build_model(frames=31, bands=60, channels=1, n_classes=10, **kwargs):
84 | shape = (bands, frames, channels)
85 |
86 | return Effnet(shape, nb_classes=n_classes, **kwargs)
87 |
88 | def main():
89 | m = build_model()
90 | m.summary()
91 | m.save('effnet.hdf5')
92 |
93 | if __name__ == '__main__':
94 | main()
95 |
96 |
97 |
--------------------------------------------------------------------------------
/microesc/models/ldcnn.py:
--------------------------------------------------------------------------------
1 |
2 | import keras
3 |
4 | def ldcnn_head(input, head_name, filters=80, L=57, W=6):
5 | def n(base):
6 | return base+'_'+head_name
7 |
8 | from keras.layers import Convolution2D, Flatten, MaxPooling2D, BatchNormalization
9 |
10 | x = input
11 | x = Convolution2D(filters, (L,1), activation='relu', name=n('SFCL1'))(x)
12 | x = BatchNormalization()(x)
13 | x = Convolution2D(filters, (1,W), activation='relu', name=n('SFCL2'))(x)
14 | x = BatchNormalization()(x)
15 | x = MaxPooling2D(pool_size=(4,3), strides=(1,3), name=n('MPL1'))(x)
16 | x = Convolution2D(filters, (1,3), dilation_rate=(2,2), name=n('DCL'))(x)
17 | x = MaxPooling2D(pool_size=(1,3), strides=(1,3), name=n('MPL2'))(x)
18 | x = Flatten(name=n('flatten'))(x)
19 | return x
20 |
21 |
22 | def ldcnn(bands=60, frames=31, n_classes=10,
23 | filters=80, L=57, W=6, fully_connected=5000, dropout=0.25):
24 |
25 | """
26 | LD-CNN: A Lightweight Dilated Convolutional Neural Network for Environmental Sound Classification
27 |
28 | http://epubs.surrey.ac.uk/849351/1/LD-CNN.pdf
29 | """
30 |
31 | from keras.models import Sequential, Model
32 | from keras.layers import Dense, Dropout, Activation, Input, Concatenate
33 | from keras.regularizers import l2
34 | import keras.layers
35 |
36 | input_shape = (bands, frames, 1)
37 |
38 | def head(input, name):
39 | return ldcnn_head(input, name, filters, L, W)
40 |
41 | mel_input = Input(shape=input_shape, name='mel_input')
42 | delta_input = Input(shape=input_shape, name='delta_input')
43 | heads = [
44 | head(mel_input, 'mel'),
45 | head(delta_input, 'delta')
46 | ]
47 | m = keras.layers.add(heads, name='FSL')
48 | m = Dropout(dropout)(m)
49 | m = Dense(fully_connected, activation='relu', kernel_regularizer=l2(0.001), name='FCL')(m)
50 | m = Dropout(dropout)(m)
51 | m = Dense(n_classes, activation='softmax')(m)
52 |
53 | model = Model([mel_input, delta_input], m)
54 |
55 | return model
56 |
57 |
58 |
59 | def ldcnn_nodelta(bands=60, frames=31, n_classes=10,
60 | filters=80, L=57, W=6, channels=1, fully_connected=5000, dropout=0.5):
61 | """Variation of LD-CNN with only mel input (no deltas)"""
62 |
63 | from keras.models import Sequential, Model
64 | from keras.layers import Dense, Dropout, Activation, Input, Concatenate
65 | from keras.regularizers import l2
66 |
67 | input_shape = (bands, frames, channels)
68 | input = Input(shape=input_shape, name='mel_input')
69 |
70 | m = ldcnn_head(input, 'mel', filters, L, W)
71 | #m = Dropout(dropout)(m)
72 |
73 | m = Dense(fully_connected, activation='relu', kernel_regularizer=l2(0.001), name='FCL')(m)
74 | m = Dropout(dropout)(m)
75 |
76 | m = Dense(n_classes, kernel_regularizer=l2(0.001))(m)
77 | m = Dropout(dropout)(m)
78 | m = Activation('softmax')(m)
79 |
80 | model = Model(input, m)
81 | return model
82 |
83 |
--------------------------------------------------------------------------------
/microesc/models/piczakcnn.py:
--------------------------------------------------------------------------------
1 |
2 |
3 | # NOTE: log-melspectrogram and delta log-melspec
4 | def build_model(bands=60, frames=41, channels=2, n_labels=10,
5 | fc=5000, dropout=0.5):
6 |
7 | """
8 | Implements the short-segment CNN from
9 |
10 | ENVIRONMENTAL SOUND CLASSIFICATION WITH CONVOLUTIONAL NEURAL NETWORKS
11 | Karol J. Piczak, 2015.
12 | https://karol.piczak.com/papers/Piczak2015-ESC-ConvNet.pdf
13 | """
14 |
15 | from keras.models import Sequential
16 | from keras.layers import Dense, Dropout, Activation, Flatten
17 | from keras.layers import Convolution2D, MaxPooling2D
18 | from keras.regularizers import l2
19 |
20 | input_shape = (bands, frames, channels)
21 |
22 | model = Sequential([
23 | Convolution2D(80, (bands-3,6), strides=(1,1), input_shape=input_shape),
24 | MaxPooling2D((4,3), strides=(1,3)),
25 | Convolution2D(80, (1,3)),
26 | MaxPooling2D((1,3), strides=(1,3)),
27 | #Flatten(),
28 | Dense(fc, activation='relu'),
29 | Dropout(dropout),
30 | Dense(fc, activation='relu'),
31 | Dropout(dropout),
32 | Dense(n_labels, activation='softmax'),
33 | ])
34 |
35 | return model
36 |
37 |
38 | def main():
39 | m = build_model()
40 | m.save('piczak.orig.hdf5')
41 |
42 | m.summary()
43 |
44 | if __name__ == '__main__':
45 | main()
46 |
--------------------------------------------------------------------------------
/microesc/models/sbcnn.py:
--------------------------------------------------------------------------------
1 |
2 |
3 | import numpy as np
4 | from keras.models import Sequential
5 | from keras.layers import Dense, Dropout, Activation, Flatten, BatchNormalization
6 | from keras.layers import Convolution2D, MaxPooling2D, SeparableConv2D
7 | from keras.regularizers import l2
8 |
9 |
10 | def build_model(frames=128, bands=128, channels=1, n_classes=10,
11 | conv_size=(5,5), conv_block='conv',
12 | downsample_size=(4,2),
13 | fully_connected=64,
14 | n_stages=None, n_blocks_per_stage=None,
15 | filters=24, kernels_growth=2,
16 | dropout=0.5,
17 | use_strides=False):
18 | """
19 | Implements SB-CNN model from
20 | Deep Convolutional Neural Networks and Data Augmentation for Environmental Sound Classification
21 | Salamon and Bello, 2016.
22 | https://arxiv.org/pdf/1608.04363.pdf
23 |
24 | Based on https://gist.github.com/jaron/5b17c9f37f351780744aefc74f93d3ae
25 | but parameters are changed back to those of the original paper authors,
26 | and added Batch Normalization
27 | """
28 | Conv2 = SeparableConv2D if conv_block == 'depthwise_separable' else Convolution2D
29 | assert conv_block in ('conv', 'depthwise_separable')
30 | kernel = conv_size
31 | if use_strides:
32 | strides = downsample_size
33 | pool = (1, 1)
34 | else:
35 | strides = (1, 1)
36 | pool = downsample_size
37 |
38 | block1 = [
39 | Convolution2D(filters, kernel, padding='same', strides=strides,
40 | input_shape=(bands, frames, channels)),
41 | BatchNormalization(),
42 | MaxPooling2D(pool_size=pool),
43 | Activation('relu'),
44 | ]
45 | block2 = [
46 | Conv2(filters*kernels_growth, kernel, padding='same', strides=strides),
47 | BatchNormalization(),
48 | MaxPooling2D(pool_size=pool),
49 | Activation('relu'),
50 | ]
51 | block3 = [
52 | Conv2(filters*kernels_growth, kernel, padding='valid', strides=strides),
53 | BatchNormalization(),
54 | Activation('relu'),
55 | ]
56 | backend = [
57 | Flatten(),
58 |
59 | Dropout(dropout),
60 | Dense(fully_connected, kernel_regularizer=l2(0.001)),
61 | Activation('relu'),
62 |
63 | Dropout(dropout),
64 | Dense(n_classes, kernel_regularizer=l2(0.001)),
65 | Activation('softmax'),
66 | ]
67 | layers = block1 + block2 + block3 + backend
68 | model = Sequential(layers)
69 | return model
70 |
71 |
72 |
--------------------------------------------------------------------------------
/microesc/models/skm.py:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | import numpy as np
5 | from keras.models import Sequential
6 | from keras.layers import Dense, Dropout, Activation, Flatten
7 | from keras.layers import Convolution2D, MaxPooling2D, SeparableConv2D
8 | from keras.regularizers import l2
9 |
10 |
11 | def build_model(frames=172, shingles=8, bands=40, channels=1, codebook=2000):
12 | """
13 | Implements convolution part of SKM model from
14 |
15 | UNSUPERVISED FEATURE LEARNING FOR URBAN SOUND CLASSIFICATION
16 | Justin Salamon and Juan Pablo Bello, 2015
17 | """
18 |
19 | input_shape=(bands, frames, channels)
20 | kernel = (bands, shingles)
21 |
22 | model = Sequential([
23 | Convolution2D(codebook, kernel, strides=(1,shingles) , padding='same', activation=None, input_shape=input_shape)
24 | ])
25 |
26 | return model
27 |
28 | def main():
29 | print('original')
30 | m = build_model()
31 | m.summary()
32 |
33 |
34 |
35 | if __name__ == '__main__':
36 | main()
37 |
38 |
--------------------------------------------------------------------------------
/microesc/models/squeezenet.py:
--------------------------------------------------------------------------------
1 |
2 | """
3 | SqueezeNet v1.1 implementation
4 | from "SqueezeNet: AlexNet-level accuracy with 50x fewer parameters and <0.5MB model size"
5 | https://arxiv.org/abs/1602.07360
6 |
7 | Based on
8 | https://github.com/rcmalli/keras-squeezenet/blob/master/keras_squeezenet/squeezenet.py
9 | """
10 |
11 | import keras
12 | from keras.layers import Convolution2D, Activation
13 |
14 |
15 | def fire_module(x, fire_id, squeeze=16, expand=64):
16 | sq1x1 = "squeeze1x1"
17 | exp1x1 = "expand1x1"
18 | exp3x3 = "expand3x3"
19 | relu = "relu_"
20 | s_id = 'fire' + str(fire_id) + '/'
21 |
22 | from keras.layers import concatenate
23 |
24 | x = Convolution2D(squeeze, (1, 1), padding='valid', name=s_id + sq1x1)(x)
25 | x = Activation('relu', name=s_id + relu + sq1x1)(x)
26 |
27 | left = Convolution2D(expand, (1, 1), padding='valid', name=s_id + exp1x1)(x)
28 | left = Activation('relu', name=s_id + relu + exp1x1)(left)
29 | right = Convolution2D(expand, (3, 3), padding='same', name=s_id + exp3x3)(x)
30 | right = Activation('relu', name=s_id + relu + exp3x3)(right)
31 |
32 | assert keras.backend.image_data_format() == 'channels_last'
33 | x = concatenate([left, right], axis=3, name=s_id + 'concat')
34 | return x
35 |
36 | def build_model(frames=32, bands=32, channels=1, n_classes=10,
37 | dropout=0.5,
38 | n_stages=3,
39 | modules_per_stage=2,
40 | initial_filters=64,
41 | squeeze_ratio=0.2,
42 | pool = (2, 2),
43 | kernel = (3, 3),
44 | stride_f = 2, stride_t = 2):
45 |
46 | from keras.models import Model
47 | from keras.layers import Input, GlobalAveragePooling2D, Dropout, MaxPooling2D
48 |
49 | input_shape = (bands, frames, channels)
50 | img_input = keras.layers.Input(shape=input_shape)
51 | x = Convolution2D(initial_filters, (3, 3), strides=(stride_f, stride_t),
52 | padding='valid', name='conv1')(img_input)
53 | x = Activation('relu', name='relu_conv1')(x)
54 |
55 | module_idx = 0
56 | for stage_no in range(1, n_stages):
57 | expand = initial_filters*stage_no
58 | squeeze = int(expand * squeeze_ratio)
59 | x = MaxPooling2D(pool_size=pool, strides=(stride_f, stride_t), name='pool'+str(stage_no))(x)
60 | for module_no in range(modules_per_stage):
61 | x = fire_module(x, fire_id=module_idx, squeeze=squeeze, expand=expand)
62 | module_idx += 1
63 |
64 | x = Dropout(dropout, name='drop9')(x)
65 | x = Convolution2D(n_classes, (1, 1), padding='valid', name='topconv')(x)
66 | x = Activation('relu', name='relu_topconv')(x)
67 | x = GlobalAveragePooling2D()(x)
68 | x = Activation('softmax', name='loss')(x)
69 |
70 | model = keras.Model(img_input, x)
71 | return model
72 |
--------------------------------------------------------------------------------
/microesc/perf.py:
--------------------------------------------------------------------------------
1 |
2 |
3 | import pandas
4 | import numpy
5 | import matplotlib
6 | from matplotlib import pyplot as plt
7 |
8 |
9 | def plot():
10 | models = pandas.read_csv('models.csv')
11 |
12 | fig, ax = plt.subplots(1)
13 | print(models.head(10))
14 | print(models.index)
15 |
16 | #models.plot(kind='scatter', ax=ax, x='parameters', y='accuracy')
17 |
18 | n_labels = len(models['name'])
19 |
20 | colors = matplotlib.cm.rainbow(numpy.linspace(0, 1, n_labels)) # create a bunch of colors
21 |
22 | for i, r in models.iterrows():
23 | ax.plot(r['parameters']/1000, r['accuracy'], 'o', label=r['name'],
24 | markersize=5, color=colors[i], linewidth=0.1)
25 |
26 | ax.legend(loc='best')
27 |
28 | fig.savefig('perf.png')
29 |
30 |
31 | if __name__ == '__main__':
32 | plot()
33 |
--------------------------------------------------------------------------------
/microesc/settings.py:
--------------------------------------------------------------------------------
1 |
2 | default_feature_settings = dict(
3 | feature='mels',
4 | samplerate=16000,
5 | n_mels=32,
6 | fmin=0,
7 | fmax=8000,
8 | n_fft=512,
9 | hop_length=256,
10 | augmentations=5,
11 | )
12 |
13 | default_training_settings = dict(
14 | epochs=50,
15 | batch=50,
16 | train_samples=36000,
17 | val_samples=3000,
18 | augment=0,
19 | learning_rate=0.01,
20 | nesterov_momentum=0.9,
21 | )
22 |
23 | default_model_settings = dict(
24 | model='sbcnn',
25 | frames=72,
26 | conv_block='conv',
27 | conv_size='5x5',
28 | downsample_size='4x2',
29 | filters=24,
30 | n_stages=3,
31 | n_blocks_per_stage=1,
32 | dropout=0.5,
33 | voting='mean',
34 | voting_overlap=0.0,
35 | normalize='meanstd',
36 | fully_connected=64,
37 | )
38 |
39 | names = set().union(*[
40 | default_feature_settings.keys(),
41 | default_training_settings.keys(),
42 | default_model_settings.keys(),
43 | ])
44 | def populate_defaults():
45 | s = {}
46 | for n in names:
47 | v = default_model_settings.get(n, None)
48 | if v is None:
49 | v = default_training_settings.get(n, None)
50 | if v is None:
51 | v = default_feature_settings.get(n, None)
52 | s[n] = v
53 | return s
54 |
55 | defaults = populate_defaults()
56 |
57 | def test_no_overlapping_settings():
58 | f = default_feature_settings.keys()
59 | t = default_training_settings.keys()
60 | m = default_model_settings.keys()
61 | assert len(names) == len(f) + len(t) + len(m)
62 |
63 | test_no_overlapping_settings()
64 |
65 | def parse_dimensions(s):
66 | pieces = s.split('x')
67 | return tuple( int(d) for d in pieces )
68 |
69 | # Functions that convert string representation to actual setting data
70 | parsers = {
71 | 'pool': parse_dimensions,
72 | 'kernel': parse_dimensions,
73 | 'conv_size': parse_dimensions,
74 | 'downsample_size': parse_dimensions,
75 | }
76 |
77 | def test_parse_dimensions():
78 | valid_examples = [
79 | ('3x3', (3,3)),
80 | ('4x2', (4,2))
81 | ]
82 | for inp, expect in valid_examples:
83 | out = parse_dimensions(inp)
84 | assert out == expect, (out, '!=', expect)
85 |
86 | test_parse_dimensions()
87 |
88 | def load_settings(args):
89 | settings = {}
90 | for key in names:
91 | string = args.get(key, defaults[key])
92 | parser = parsers.get(key, lambda x: x)
93 | value = parser(string)
94 | settings[key] = value
95 |
96 | return settings
97 |
98 |
99 | def test_settings_empty():
100 | load_settings({})
101 |
102 | test_settings_empty()
103 |
104 |
105 | def add_arguments(parser):
106 | a = parser.add_argument
107 |
108 | for name in names:
109 | data_type = type(defaults[name])
110 | default = None
111 | a('--{}'.format(name), default=default, type=data_type,
112 | help='%(default)s'
113 | )
114 |
115 |
116 |
--------------------------------------------------------------------------------
/microesc/variations.py:
--------------------------------------------------------------------------------
1 |
2 | import uuid
3 | import os.path
4 |
5 | import sklearn
6 | import pandas
7 |
8 | import models, stm32convert
9 |
10 | def sbcnn_generator(n_iter=400, random_state=1):
11 |
12 | from sklearn.model_selection import ParameterSampler
13 |
14 | params = dict(
15 | kernel_t = range(3, 10, 2),
16 | kernel_f = range(3, 10, 2),
17 | pool_t = range(2, 5),
18 | pool_f = range(2, 5),
19 | kernels_start = range(16, 64),
20 | fully_connected = range(16, 128),
21 | )
22 |
23 | sampler = ParameterSampler(params, n_iter=n_iter, random_state=random_state)
24 |
25 | out_models = []
26 | out_total_params = []
27 | for p in sampler:
28 | s = {
29 | 'model': 'sbcnn',
30 | 'frames': 31,
31 | 'n_mels': 60,
32 | 'samplerate': 22050,
33 | }
34 |
35 | pool = p['pool_f'], p['pool_t']
36 | kernel = p['kernel_f'], p['kernel_t']
37 | for k, v in p.items():
38 | s[k] = v
39 | s['pool'] = pool
40 | s['kernel'] = kernel
41 |
42 | yield p, s
43 |
44 |
45 | def generate_models():
46 |
47 | gen = sbcnn_generator()
48 | data = {
49 | 'model_path': [],
50 | 'gen_path': [],
51 | 'id': [],
52 | }
53 | for out in iter(gen):
54 | model = None
55 |
56 | try:
57 | params, settings = out
58 | model = models.build(settings.copy())
59 | except ValueError as e:
60 | print('Error:', e)
61 | continue
62 |
63 |
64 | # Store parameters
65 | for k, v in params.items():
66 | if data.get(k) is None:
67 | data[k] = []
68 | data[k].append(v)
69 |
70 | model_id = str(uuid.uuid4())
71 | out_dir = os.path.join('scan', model_id)
72 | os.makedirs(out_dir)
73 |
74 | model_path = os.path.join(out_dir, 'model.orig.hdf5')
75 | out_path = os.path.join(out_dir, 'gen')
76 |
77 | # Store model
78 | model.save(model_path)
79 | stats = stm32convert.generatecode(model_path, out_path,
80 | name='network', model_type='keras', compression=None)
81 |
82 | # Store model info
83 | data['model_path'].append(model_path)
84 | data['gen_path'].append(out_path)
85 | data['id'].append(model_id)
86 |
87 | for k, v in stats.items():
88 | if data.get(k) is None:
89 | data[k] = []
90 | data[k].append(v)
91 |
92 | df = pandas.DataFrame(data)
93 | return df
94 |
95 | def main():
96 |
97 | df = generate_models()
98 | df.to_csv('scan.csv')
99 |
100 |
101 | if __name__ == '__main__':
102 | main()
103 |
--------------------------------------------------------------------------------
/model.py:
--------------------------------------------------------------------------------
1 |
2 | from microesc import train, stats, common
3 |
4 | def main():
5 | settings = common.load_experiment('experiments', 'ldcnn20k60')
6 |
7 | def build():
8 | return train.sb_cnn(settings)
9 |
10 | m = build()
11 | m.summary()
12 | m.save('model.wip.hdf5')
13 |
14 | s = settings
15 | shape = (s['n_mels'], s['frames'], 1)
16 | model_stats = stats.analyze_model(build, [shape], n_classes=10)
17 |
18 | flops, params = model_stats
19 | inference_flops = { name: v for name, v in flops.items() if not stats.is_training_scope(name) }
20 | for name, flop in inference_flops.items():
21 | print(name, flop)
22 |
23 |
24 |
25 | if __name__ == '__main__':
26 | main()
27 |
--------------------------------------------------------------------------------
/models.csv:
--------------------------------------------------------------------------------
1 | model,conv_block,n_stages,conv_size,downsample_size,filters,modelcheck,nickname
2 | sbcnn,conv,3,5x5,3x2,24,skip,Baseline
3 | sbcnn,depthwise_separable,3,5x5,3x2,24,,Baseline-DS
4 | strided,conv,3,5x5,2x2,22,,Stride
5 | strided,depthwise_separable,3,5x5,2x2,24,,Stride-DS-24
6 | strided,effnet,3,5x5,2x2,22,,Stride-Effnet
7 | strided,depthwise_separable,4,3x3,2x2,24,,Stride-DS-3x3
8 | strided,bottleneck_ds,3,5x5,2x2,22,,Stride-BTLN-DS
9 | strided,depthwise_separable,3,5x5,2x2,20,,Stride-DS-20
10 | strided,depthwise_separable,3,5x5,2x2,16,,Stride-DS-16
11 | strided,depthwise_separable,3,5x5,2x2,12,,Stride-DS-12
12 |
--------------------------------------------------------------------------------
/models16k.csv:
--------------------------------------------------------------------------------
1 | model,conv_block,n_stages,conv_size,downsample_size,filters,modelcheck,nickname
2 | sbcnn,conv,3,5x5,2x2,24,skip,Baseline
3 | sbcnn,depthwise_separable,3,5x5,2x2,24,skip,Baseline-DS
4 | strided,conv,3,5x5,2x2,22,,Stride
5 | strided,depthwise_separable,3,5x5,2x2,24,,Stride-DS-24
6 | strided,depthwise_separable,4,3x3,2x2,24,,Stride-DS-3x3
7 |
--------------------------------------------------------------------------------
/preprocess.py:
--------------------------------------------------------------------------------
1 |
2 | from microesc import preprocess
3 | preprocess.main()
4 |
--------------------------------------------------------------------------------
/presentation/Makefile:
--------------------------------------------------------------------------------
1 |
2 | slides:
3 | pandoc -t revealjs -s presentation.md -o slides.html --slide-level=2 --mathml
4 |
5 | slides.pdf:
6 | pandoc -t beamer -s presentation.md -o slides.pdf --slide-level=2 --mathml
7 |
--------------------------------------------------------------------------------
/presentation/img/cover.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jonnor/ESC-CNN-microcontroller/572c319c7ad4d0a98bf210d59b26f6df923c8e7b/presentation/img/cover.png
--------------------------------------------------------------------------------
/presentation/img/cpu-efficiency.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jonnor/ESC-CNN-microcontroller/572c319c7ad4d0a98bf210d59b26f6df923c8e7b/presentation/img/cpu-efficiency.png
--------------------------------------------------------------------------------
/presentation/img/fail-dropout.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jonnor/ESC-CNN-microcontroller/572c319c7ad4d0a98bf210d59b26f6df923c8e7b/presentation/img/fail-dropout.png
--------------------------------------------------------------------------------
/presentation/img/fail-truncation.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jonnor/ESC-CNN-microcontroller/572c319c7ad4d0a98bf210d59b26f6df923c8e7b/presentation/img/fail-truncation.png
--------------------------------------------------------------------------------
/presentation/img/models-list.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jonnor/ESC-CNN-microcontroller/572c319c7ad4d0a98bf210d59b26f6df923c8e7b/presentation/img/models-list.png
--------------------------------------------------------------------------------
/presentation/img/nmbu_logo_eng_rgb.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jonnor/ESC-CNN-microcontroller/572c319c7ad4d0a98bf210d59b26f6df923c8e7b/presentation/img/nmbu_logo_eng_rgb.jpg
--------------------------------------------------------------------------------
/presentation/img/nmbu_logo_eng_rgb_trans.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jonnor/ESC-CNN-microcontroller/572c319c7ad4d0a98bf210d59b26f6df923c8e7b/presentation/img/nmbu_logo_eng_rgb_trans.png
--------------------------------------------------------------------------------
/presentation/img/noise-monitoring.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jonnor/ESC-CNN-microcontroller/572c319c7ad4d0a98bf210d59b26f6df923c8e7b/presentation/img/noise-monitoring.jpg
--------------------------------------------------------------------------------
/presentation/img/results.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jonnor/ESC-CNN-microcontroller/572c319c7ad4d0a98bf210d59b26f6df923c8e7b/presentation/img/results.png
--------------------------------------------------------------------------------
/presentation/img/soundsensing-logo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jonnor/ESC-CNN-microcontroller/572c319c7ad4d0a98bf210d59b26f6df923c8e7b/presentation/img/soundsensing-logo.png
--------------------------------------------------------------------------------
/presentation/img/soundsensing-logo.xcf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jonnor/ESC-CNN-microcontroller/572c319c7ad4d0a98bf210d59b26f6df923c8e7b/presentation/img/soundsensing-logo.xcf
--------------------------------------------------------------------------------
/presentation/img/stoykart.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jonnor/ESC-CNN-microcontroller/572c319c7ad4d0a98bf210d59b26f6df923c8e7b/presentation/img/stoykart.png
--------------------------------------------------------------------------------
/presentation/img/training-settings.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jonnor/ESC-CNN-microcontroller/572c319c7ad4d0a98bf210d59b26f6df923c8e7b/presentation/img/training-settings.png
--------------------------------------------------------------------------------
/presentation/img/unknown-class-tradeoffs.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jonnor/ESC-CNN-microcontroller/572c319c7ad4d0a98bf210d59b26f6df923c8e7b/presentation/img/unknown-class-tradeoffs.png
--------------------------------------------------------------------------------
/presentation/img/unknown-class.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jonnor/ESC-CNN-microcontroller/572c319c7ad4d0a98bf210d59b26f6df923c8e7b/presentation/img/unknown-class.png
--------------------------------------------------------------------------------
/presentation/img/xcubeai.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jonnor/ESC-CNN-microcontroller/572c319c7ad4d0a98bf210d59b26f6df923c8e7b/presentation/img/xcubeai.png
--------------------------------------------------------------------------------
/presentation/notes.md:
--------------------------------------------------------------------------------
1 |
2 | 20-30 minutes.
3 | 20 slides?
4 |
5 | 6 sections a 5 minutes
6 |
7 | # TODO
8 |
9 | - Go through presentation once
10 | - Add notes to all slides
11 |
12 |
13 | # Scope
14 |
15 | - Problem definition
16 | - Urbansound8k dataset
17 | - Melspectrogram
18 | - CNN audio model
19 | - SB-CNN model
20 |
21 | Out-of-scope
22 |
23 | - Teaching basic Machine Learning
24 | - Teaching CNN?
25 |
26 | # Results
27 |
28 | - Bottleneck and EffNet performed poorly
29 | - Practical speedup not linear with MACC
30 | - Striding on input means downsampling.
31 | Could a smaller feature representation perform similar?
32 |
33 | Remarks
34 |
35 | - GPU utilization was poor. Probably model too small
36 |
37 | # Challenges & Solutions
38 |
39 | RAM, CPU, FLASH
40 |
41 | - Reduce input feature representation. mels & time.
42 | Use from LD-CNN. 31 frames @ 22050 Hz (720 ms)
43 | - Use CNNs instead of DNN.
44 | Shown to get higher performance, more parameter efficient
45 | - Reduce use of overlaps
46 | No overlap. Existing uses maximum overlap, 100x the CPU time.
47 | - Apply more efficient convolutional blocks
48 | Depthwise separable, Spatially Separable, Bottleneck, EffNet
49 | -
50 |
51 | ## Misc
52 |
53 | SB-CNN paper cited 280+ times
54 |
55 |
56 |
57 |
58 |
59 |
--------------------------------------------------------------------------------
/presentation/reveal.js/css/print/pdf.css:
--------------------------------------------------------------------------------
1 | /**
2 | * This stylesheet is used to print reveal.js
3 | * presentations to PDF.
4 | *
5 | * https://github.com/hakimel/reveal.js#pdf-export
6 | */
7 |
8 | * {
9 | -webkit-print-color-adjust: exact;
10 | }
11 |
12 | body {
13 | margin: 0 auto !important;
14 | border: 0;
15 | padding: 0;
16 | float: none !important;
17 | overflow: visible;
18 | }
19 |
20 | html {
21 | width: 100%;
22 | height: 100%;
23 | overflow: visible;
24 | }
25 |
26 | /* Remove any elements not needed in print. */
27 | .nestedarrow,
28 | .reveal .controls,
29 | .reveal .progress,
30 | .reveal .playback,
31 | .reveal.overview,
32 | .fork-reveal,
33 | .share-reveal,
34 | .state-background {
35 | display: none !important;
36 | }
37 |
38 | h1, h2, h3, h4, h5, h6 {
39 | text-shadow: 0 0 0 #000 !important;
40 | }
41 |
42 | .reveal pre code {
43 | overflow: hidden !important;
44 | font-family: Courier, 'Courier New', monospace !important;
45 | }
46 |
47 | ul, ol, div, p {
48 | visibility: visible;
49 | position: static;
50 | width: auto;
51 | height: auto;
52 | display: block;
53 | overflow: visible;
54 | margin: auto;
55 | }
56 | .reveal {
57 | width: auto !important;
58 | height: auto !important;
59 | overflow: hidden !important;
60 | }
61 | .reveal .slides {
62 | position: static;
63 | width: 100% !important;
64 | height: auto !important;
65 | zoom: 1 !important;
66 |
67 | left: auto;
68 | top: auto;
69 | margin: 0 !important;
70 | padding: 0 !important;
71 |
72 | overflow: visible;
73 | display: block;
74 |
75 | perspective: none;
76 | perspective-origin: 50% 50%;
77 | }
78 |
79 | .reveal .slides .pdf-page {
80 | position: relative;
81 | overflow: hidden;
82 | z-index: 1;
83 |
84 | page-break-after: always;
85 | }
86 |
87 | .reveal .slides section {
88 | visibility: visible !important;
89 | display: block !important;
90 | position: absolute !important;
91 |
92 | margin: 0 !important;
93 | padding: 0 !important;
94 | box-sizing: border-box !important;
95 | min-height: 1px;
96 |
97 | opacity: 1 !important;
98 |
99 | transform-style: flat !important;
100 | transform: none !important;
101 | }
102 |
103 | .reveal section.stack {
104 | position: relative !important;
105 | margin: 0 !important;
106 | padding: 0 !important;
107 | page-break-after: avoid !important;
108 | height: auto !important;
109 | min-height: auto !important;
110 | }
111 |
112 | .reveal img {
113 | box-shadow: none;
114 | }
115 |
116 | .reveal .roll {
117 | overflow: visible;
118 | line-height: 1em;
119 | }
120 |
121 | /* Slide backgrounds are placed inside of their slide when exporting to PDF */
122 | .reveal .slide-background {
123 | display: block !important;
124 | position: absolute;
125 | top: 0;
126 | left: 0;
127 | width: 100%;
128 | height: 100%;
129 | z-index: auto !important;
130 | }
131 |
132 | /* Display slide speaker notes when 'showNotes' is enabled */
133 | .reveal.show-notes {
134 | max-width: none;
135 | max-height: none;
136 | }
137 | .reveal .speaker-notes-pdf {
138 | display: block;
139 | width: 100%;
140 | height: auto;
141 | max-height: none;
142 | top: auto;
143 | right: auto;
144 | bottom: auto;
145 | left: auto;
146 | z-index: 100;
147 | }
148 |
149 | /* Layout option which makes notes appear on a separate page */
150 | .reveal .speaker-notes-pdf[data-layout="separate-page"] {
151 | position: relative;
152 | color: inherit;
153 | background-color: transparent;
154 | padding: 20px;
155 | page-break-after: always;
156 | border: 0;
157 | }
158 |
159 | /* Display slide numbers when 'slideNumber' is enabled */
160 | .reveal .slide-number-pdf {
161 | display: block;
162 | position: absolute;
163 | font-size: 14px;
164 | }
165 |
--------------------------------------------------------------------------------
/presentation/reveal.js/css/theme/README.md:
--------------------------------------------------------------------------------
1 | ## Dependencies
2 |
3 | Themes are written using Sass to keep things modular and reduce the need for repeated selectors across files. Make sure that you have the reveal.js development environment including the Grunt dependencies installed before proceeding: https://github.com/hakimel/reveal.js#full-setup
4 |
5 | ## Creating a Theme
6 |
7 | To create your own theme, start by duplicating a ```.scss``` file in [/css/theme/source](https://github.com/hakimel/reveal.js/blob/master/css/theme/source). It will be automatically compiled by Grunt from Sass to CSS (see the [Gruntfile](https://github.com/hakimel/reveal.js/blob/master/Gruntfile.js)) when you run `npm run build -- css-themes`.
8 |
9 | Each theme file does four things in the following order:
10 |
11 | 1. **Include [/css/theme/template/mixins.scss](https://github.com/hakimel/reveal.js/blob/master/css/theme/template/mixins.scss)**
12 | Shared utility functions.
13 |
14 | 2. **Include [/css/theme/template/settings.scss](https://github.com/hakimel/reveal.js/blob/master/css/theme/template/settings.scss)**
15 | Declares a set of custom variables that the template file (step 4) expects. Can be overridden in step 3.
16 |
17 | 3. **Override**
18 | This is where you override the default theme. Either by specifying variables (see [settings.scss](https://github.com/hakimel/reveal.js/blob/master/css/theme/template/settings.scss) for reference) or by adding any selectors and styles you please.
19 |
20 | 4. **Include [/css/theme/template/theme.scss](https://github.com/hakimel/reveal.js/blob/master/css/theme/template/theme.scss)**
21 | The template theme file which will generate final CSS output based on the currently defined variables.
22 |
--------------------------------------------------------------------------------
/presentation/reveal.js/css/theme/source/beige.scss:
--------------------------------------------------------------------------------
1 | /**
2 | * Beige theme for reveal.js.
3 | *
4 | * Copyright (C) 2011-2012 Hakim El Hattab, http://hakim.se
5 | */
6 |
7 |
8 | // Default mixins and settings -----------------
9 | @import "../template/mixins";
10 | @import "../template/settings";
11 | // ---------------------------------------------
12 |
13 |
14 |
15 | // Include theme-specific fonts
16 | @import url(../../lib/font/league-gothic/league-gothic.css);
17 | @import url(https://fonts.googleapis.com/css?family=Lato:400,700,400italic,700italic);
18 |
19 |
20 | // Override theme settings (see ../template/settings.scss)
21 | $mainColor: #333;
22 | $headingColor: #333;
23 | $headingTextShadow: none;
24 | $backgroundColor: #f7f3de;
25 | $linkColor: #8b743d;
26 | $linkColorHover: lighten( $linkColor, 20% );
27 | $selectionBackgroundColor: rgba(79, 64, 28, 0.99);
28 | $heading1TextShadow: 0 1px 0 #ccc, 0 2px 0 #c9c9c9, 0 3px 0 #bbb, 0 4px 0 #b9b9b9, 0 5px 0 #aaa, 0 6px 1px rgba(0,0,0,.1), 0 0 5px rgba(0,0,0,.1), 0 1px 3px rgba(0,0,0,.3), 0 3px 5px rgba(0,0,0,.2), 0 5px 10px rgba(0,0,0,.25), 0 20px 20px rgba(0,0,0,.15);
29 |
30 | // Background generator
31 | @mixin bodyBackground() {
32 | @include radial-gradient( rgba(247,242,211,1), rgba(255,255,255,1) );
33 | }
34 |
35 |
36 |
37 | // Theme template ------------------------------
38 | @import "../template/theme";
39 | // ---------------------------------------------
--------------------------------------------------------------------------------
/presentation/reveal.js/css/theme/source/black.scss:
--------------------------------------------------------------------------------
1 | /**
2 | * Black theme for reveal.js. This is the opposite of the 'white' theme.
3 | *
4 | * By Hakim El Hattab, http://hakim.se
5 | */
6 |
7 |
8 | // Default mixins and settings -----------------
9 | @import "../template/mixins";
10 | @import "../template/settings";
11 | // ---------------------------------------------
12 |
13 |
14 | // Include theme-specific fonts
15 | @import url(../../lib/font/source-sans-pro/source-sans-pro.css);
16 |
17 |
18 | // Override theme settings (see ../template/settings.scss)
19 | $backgroundColor: #222;
20 |
21 | $mainColor: #fff;
22 | $headingColor: #fff;
23 |
24 | $mainFontSize: 42px;
25 | $mainFont: 'Source Sans Pro', Helvetica, sans-serif;
26 | $headingFont: 'Source Sans Pro', Helvetica, sans-serif;
27 | $headingTextShadow: none;
28 | $headingLetterSpacing: normal;
29 | $headingTextTransform: uppercase;
30 | $headingFontWeight: 600;
31 | $linkColor: #42affa;
32 | $linkColorHover: lighten( $linkColor, 15% );
33 | $selectionBackgroundColor: lighten( $linkColor, 25% );
34 |
35 | $heading1Size: 2.5em;
36 | $heading2Size: 1.6em;
37 | $heading3Size: 1.3em;
38 | $heading4Size: 1.0em;
39 |
40 | section.has-light-background {
41 | &, h1, h2, h3, h4, h5, h6 {
42 | color: #222;
43 | }
44 | }
45 |
46 |
47 | // Theme template ------------------------------
48 | @import "../template/theme";
49 | // ---------------------------------------------
--------------------------------------------------------------------------------
/presentation/reveal.js/css/theme/source/blood.scss:
--------------------------------------------------------------------------------
1 | /**
2 | * Blood theme for reveal.js
3 | * Author: Walther http://github.com/Walther
4 | *
5 | * Designed to be used with highlight.js theme
6 | * "monokai_sublime.css" available from
7 | * https://github.com/isagalaev/highlight.js/
8 | *
9 | * For other themes, change $codeBackground accordingly.
10 | *
11 | */
12 |
13 | // Default mixins and settings -----------------
14 | @import "../template/mixins";
15 | @import "../template/settings";
16 | // ---------------------------------------------
17 |
18 | // Include theme-specific fonts
19 |
20 | @import url(https://fonts.googleapis.com/css?family=Ubuntu:300,700,300italic,700italic);
21 |
22 | // Colors used in the theme
23 | $blood: #a23;
24 | $coal: #222;
25 | $codeBackground: #23241f;
26 |
27 | $backgroundColor: $coal;
28 |
29 | // Main text
30 | $mainFont: Ubuntu, 'sans-serif';
31 | $mainColor: #eee;
32 |
33 | // Headings
34 | $headingFont: Ubuntu, 'sans-serif';
35 | $headingTextShadow: 2px 2px 2px $coal;
36 |
37 | // h1 shadow, borrowed humbly from
38 | // (c) Default theme by Hakim El Hattab
39 | $heading1TextShadow: 0 1px 0 #ccc, 0 2px 0 #c9c9c9, 0 3px 0 #bbb, 0 4px 0 #b9b9b9, 0 5px 0 #aaa, 0 6px 1px rgba(0,0,0,.1), 0 0 5px rgba(0,0,0,.1), 0 1px 3px rgba(0,0,0,.3), 0 3px 5px rgba(0,0,0,.2), 0 5px 10px rgba(0,0,0,.25), 0 20px 20px rgba(0,0,0,.15);
40 |
41 | // Links
42 | $linkColor: $blood;
43 | $linkColorHover: lighten( $linkColor, 20% );
44 |
45 | // Text selection
46 | $selectionBackgroundColor: $blood;
47 | $selectionColor: #fff;
48 |
49 |
50 | // Theme template ------------------------------
51 | @import "../template/theme";
52 | // ---------------------------------------------
53 |
54 | // some overrides after theme template import
55 |
56 | .reveal p {
57 | font-weight: 300;
58 | text-shadow: 1px 1px $coal;
59 | }
60 |
61 | .reveal h1,
62 | .reveal h2,
63 | .reveal h3,
64 | .reveal h4,
65 | .reveal h5,
66 | .reveal h6 {
67 | font-weight: 700;
68 | }
69 |
70 | .reveal p code {
71 | background-color: $codeBackground;
72 | display: inline-block;
73 | border-radius: 7px;
74 | }
75 |
76 | .reveal small code {
77 | vertical-align: baseline;
78 | }
--------------------------------------------------------------------------------
/presentation/reveal.js/css/theme/source/league.scss:
--------------------------------------------------------------------------------
1 | /**
2 | * League theme for reveal.js.
3 | *
4 | * This was the default theme pre-3.0.0.
5 | *
6 | * Copyright (C) 2011-2012 Hakim El Hattab, http://hakim.se
7 | */
8 |
9 |
10 | // Default mixins and settings -----------------
11 | @import "../template/mixins";
12 | @import "../template/settings";
13 | // ---------------------------------------------
14 |
15 |
16 |
17 | // Include theme-specific fonts
18 | @import url(../../lib/font/league-gothic/league-gothic.css);
19 | @import url(https://fonts.googleapis.com/css?family=Lato:400,700,400italic,700italic);
20 |
21 | // Override theme settings (see ../template/settings.scss)
22 | $headingTextShadow: 0px 0px 6px rgba(0,0,0,0.2);
23 | $heading1TextShadow: 0 1px 0 #ccc, 0 2px 0 #c9c9c9, 0 3px 0 #bbb, 0 4px 0 #b9b9b9, 0 5px 0 #aaa, 0 6px 1px rgba(0,0,0,.1), 0 0 5px rgba(0,0,0,.1), 0 1px 3px rgba(0,0,0,.3), 0 3px 5px rgba(0,0,0,.2), 0 5px 10px rgba(0,0,0,.25), 0 20px 20px rgba(0,0,0,.15);
24 |
25 | // Background generator
26 | @mixin bodyBackground() {
27 | @include radial-gradient( rgba(28,30,32,1), rgba(85,90,95,1) );
28 | }
29 |
30 |
31 |
32 | // Theme template ------------------------------
33 | @import "../template/theme";
34 | // ---------------------------------------------
--------------------------------------------------------------------------------
/presentation/reveal.js/css/theme/source/moon.scss:
--------------------------------------------------------------------------------
1 | /**
2 | * Solarized Dark theme for reveal.js.
3 | * Author: Achim Staebler
4 | */
5 |
6 |
7 | // Default mixins and settings -----------------
8 | @import "../template/mixins";
9 | @import "../template/settings";
10 | // ---------------------------------------------
11 |
12 |
13 |
14 | // Include theme-specific fonts
15 | @import url(../../lib/font/league-gothic/league-gothic.css);
16 | @import url(https://fonts.googleapis.com/css?family=Lato:400,700,400italic,700italic);
17 |
18 | /**
19 | * Solarized colors by Ethan Schoonover
20 | */
21 | html * {
22 | color-profile: sRGB;
23 | rendering-intent: auto;
24 | }
25 |
26 | // Solarized colors
27 | $base03: #002b36;
28 | $base02: #073642;
29 | $base01: #586e75;
30 | $base00: #657b83;
31 | $base0: #839496;
32 | $base1: #93a1a1;
33 | $base2: #eee8d5;
34 | $base3: #fdf6e3;
35 | $yellow: #b58900;
36 | $orange: #cb4b16;
37 | $red: #dc322f;
38 | $magenta: #d33682;
39 | $violet: #6c71c4;
40 | $blue: #268bd2;
41 | $cyan: #2aa198;
42 | $green: #859900;
43 |
44 | // Override theme settings (see ../template/settings.scss)
45 | $mainColor: $base1;
46 | $headingColor: $base2;
47 | $headingTextShadow: none;
48 | $backgroundColor: $base03;
49 | $linkColor: $blue;
50 | $linkColorHover: lighten( $linkColor, 20% );
51 | $selectionBackgroundColor: $magenta;
52 |
53 |
54 |
55 | // Theme template ------------------------------
56 | @import "../template/theme";
57 | // ---------------------------------------------
58 |
--------------------------------------------------------------------------------
/presentation/reveal.js/css/theme/source/night.scss:
--------------------------------------------------------------------------------
1 | /**
2 | * Black theme for reveal.js.
3 | *
4 | * Copyright (C) 2011-2012 Hakim El Hattab, http://hakim.se
5 | */
6 |
7 |
8 | // Default mixins and settings -----------------
9 | @import "../template/mixins";
10 | @import "../template/settings";
11 | // ---------------------------------------------
12 |
13 |
14 | // Include theme-specific fonts
15 | @import url(https://fonts.googleapis.com/css?family=Montserrat:700);
16 | @import url(https://fonts.googleapis.com/css?family=Open+Sans:400,700,400italic,700italic);
17 |
18 |
19 | // Override theme settings (see ../template/settings.scss)
20 | $backgroundColor: #111;
21 |
22 | $mainFont: 'Open Sans', sans-serif;
23 | $linkColor: #e7ad52;
24 | $linkColorHover: lighten( $linkColor, 20% );
25 | $headingFont: 'Montserrat', Impact, sans-serif;
26 | $headingTextShadow: none;
27 | $headingLetterSpacing: -0.03em;
28 | $headingTextTransform: none;
29 | $selectionBackgroundColor: #e7ad52;
30 |
31 |
32 | // Theme template ------------------------------
33 | @import "../template/theme";
34 | // ---------------------------------------------
--------------------------------------------------------------------------------
/presentation/reveal.js/css/theme/source/serif.scss:
--------------------------------------------------------------------------------
1 | /**
2 | * A simple theme for reveal.js presentations, similar
3 | * to the default theme. The accent color is brown.
4 | *
5 | * This theme is Copyright (C) 2012-2013 Owen Versteeg, http://owenversteeg.com - it is MIT licensed.
6 | */
7 |
8 |
9 | // Default mixins and settings -----------------
10 | @import "../template/mixins";
11 | @import "../template/settings";
12 | // ---------------------------------------------
13 |
14 |
15 |
16 | // Override theme settings (see ../template/settings.scss)
17 | $mainFont: 'Palatino Linotype', 'Book Antiqua', Palatino, FreeSerif, serif;
18 | $mainColor: #000;
19 | $headingFont: 'Palatino Linotype', 'Book Antiqua', Palatino, FreeSerif, serif;
20 | $headingColor: #383D3D;
21 | $headingTextShadow: none;
22 | $headingTextTransform: none;
23 | $backgroundColor: #F0F1EB;
24 | $linkColor: #51483D;
25 | $linkColorHover: lighten( $linkColor, 20% );
26 | $selectionBackgroundColor: #26351C;
27 |
28 | .reveal a {
29 | line-height: 1.3em;
30 | }
31 |
32 |
33 | // Theme template ------------------------------
34 | @import "../template/theme";
35 | // ---------------------------------------------
36 |
--------------------------------------------------------------------------------
/presentation/reveal.js/css/theme/source/simple.scss:
--------------------------------------------------------------------------------
1 | /**
2 | * A simple theme for reveal.js presentations, similar
3 | * to the default theme. The accent color is darkblue.
4 | *
5 | * This theme is Copyright (C) 2012 Owen Versteeg, https://github.com/StereotypicalApps. It is MIT licensed.
6 | * reveal.js is Copyright (C) 2011-2012 Hakim El Hattab, http://hakim.se
7 | */
8 |
9 |
10 | // Default mixins and settings -----------------
11 | @import "../template/mixins";
12 | @import "../template/settings";
13 | // ---------------------------------------------
14 |
15 |
16 |
17 | // Include theme-specific fonts
18 | @import url(https://fonts.googleapis.com/css?family=News+Cycle:400,700);
19 | @import url(https://fonts.googleapis.com/css?family=Lato:400,700,400italic,700italic);
20 |
21 |
22 | // Override theme settings (see ../template/settings.scss)
23 | $mainFont: 'Lato', sans-serif;
24 | $mainColor: #000;
25 | $headingFont: 'News Cycle', Impact, sans-serif;
26 | $headingColor: #000;
27 | $headingTextShadow: none;
28 | $headingTextTransform: none;
29 | $backgroundColor: #fff;
30 | $linkColor: #00008B;
31 | $linkColorHover: lighten( $linkColor, 20% );
32 | $selectionBackgroundColor: rgba(0, 0, 0, 0.99);
33 |
34 | section.has-dark-background {
35 | &, h1, h2, h3, h4, h5, h6 {
36 | color: #fff;
37 | }
38 | }
39 |
40 |
41 | // Theme template ------------------------------
42 | @import "../template/theme";
43 | // ---------------------------------------------
--------------------------------------------------------------------------------
/presentation/reveal.js/css/theme/source/sky.scss:
--------------------------------------------------------------------------------
1 | /**
2 | * Sky theme for reveal.js.
3 | *
4 | * Copyright (C) 2011-2012 Hakim El Hattab, http://hakim.se
5 | */
6 |
7 |
8 | // Default mixins and settings -----------------
9 | @import "../template/mixins";
10 | @import "../template/settings";
11 | // ---------------------------------------------
12 |
13 |
14 |
15 | // Include theme-specific fonts
16 | @import url(https://fonts.googleapis.com/css?family=Quicksand:400,700,400italic,700italic);
17 | @import url(https://fonts.googleapis.com/css?family=Open+Sans:400italic,700italic,400,700);
18 |
19 |
20 | // Override theme settings (see ../template/settings.scss)
21 | $mainFont: 'Open Sans', sans-serif;
22 | $mainColor: #333;
23 | $headingFont: 'Quicksand', sans-serif;
24 | $headingColor: #333;
25 | $headingLetterSpacing: -0.08em;
26 | $headingTextShadow: none;
27 | $backgroundColor: #f7fbfc;
28 | $linkColor: #3b759e;
29 | $linkColorHover: lighten( $linkColor, 20% );
30 | $selectionBackgroundColor: #134674;
31 |
32 | // Fix links so they are not cut off
33 | .reveal a {
34 | line-height: 1.3em;
35 | }
36 |
37 | // Background generator
38 | @mixin bodyBackground() {
39 | @include radial-gradient( #add9e4, #f7fbfc );
40 | }
41 |
42 |
43 |
44 | // Theme template ------------------------------
45 | @import "../template/theme";
46 | // ---------------------------------------------
47 |
--------------------------------------------------------------------------------
/presentation/reveal.js/css/theme/source/solarized.scss:
--------------------------------------------------------------------------------
1 | /**
2 | * Solarized Light theme for reveal.js.
3 | * Author: Achim Staebler
4 | */
5 |
6 |
7 | // Default mixins and settings -----------------
8 | @import "../template/mixins";
9 | @import "../template/settings";
10 | // ---------------------------------------------
11 |
12 |
13 |
14 | // Include theme-specific fonts
15 | @import url(../../lib/font/league-gothic/league-gothic.css);
16 | @import url(https://fonts.googleapis.com/css?family=Lato:400,700,400italic,700italic);
17 |
18 |
19 | /**
20 | * Solarized colors by Ethan Schoonover
21 | */
22 | html * {
23 | color-profile: sRGB;
24 | rendering-intent: auto;
25 | }
26 |
27 | // Solarized colors
28 | $base03: #002b36;
29 | $base02: #073642;
30 | $base01: #586e75;
31 | $base00: #657b83;
32 | $base0: #839496;
33 | $base1: #93a1a1;
34 | $base2: #eee8d5;
35 | $base3: #fdf6e3;
36 | $yellow: #b58900;
37 | $orange: #cb4b16;
38 | $red: #dc322f;
39 | $magenta: #d33682;
40 | $violet: #6c71c4;
41 | $blue: #268bd2;
42 | $cyan: #2aa198;
43 | $green: #859900;
44 |
45 | // Override theme settings (see ../template/settings.scss)
46 | $mainColor: $base00;
47 | $headingColor: $base01;
48 | $headingTextShadow: none;
49 | $backgroundColor: $base3;
50 | $linkColor: $blue;
51 | $linkColorHover: lighten( $linkColor, 20% );
52 | $selectionBackgroundColor: $magenta;
53 |
54 | // Background generator
55 | // @mixin bodyBackground() {
56 | // @include radial-gradient( rgba($base3,1), rgba(lighten($base3, 20%),1) );
57 | // }
58 |
59 |
60 |
61 | // Theme template ------------------------------
62 | @import "../template/theme";
63 | // ---------------------------------------------
64 |
--------------------------------------------------------------------------------
/presentation/reveal.js/css/theme/source/white.scss:
--------------------------------------------------------------------------------
1 | /**
2 | * White theme for reveal.js. This is the opposite of the 'black' theme.
3 | *
4 | * By Hakim El Hattab, http://hakim.se
5 | */
6 |
7 |
8 | // Default mixins and settings -----------------
9 | @import "../template/mixins";
10 | @import "../template/settings";
11 | // ---------------------------------------------
12 |
13 |
14 | // Include theme-specific fonts
15 | @import url(../../lib/font/source-sans-pro/source-sans-pro.css);
16 |
17 |
18 | // Override theme settings (see ../template/settings.scss)
19 | $backgroundColor: #fff;
20 |
21 | $mainColor: #222;
22 | $headingColor: #222;
23 |
24 | $mainFontSize: 42px;
25 | $mainFont: 'Source Sans Pro', Helvetica, sans-serif;
26 | $headingFont: 'Source Sans Pro', Helvetica, sans-serif;
27 | $headingTextShadow: none;
28 | $headingLetterSpacing: normal;
29 | $headingTextTransform: uppercase;
30 | $headingFontWeight: 600;
31 | $linkColor: #2a76dd;
32 | $linkColorHover: lighten( $linkColor, 15% );
33 | $selectionBackgroundColor: lighten( $linkColor, 25% );
34 |
35 | $heading1Size: 2.5em;
36 | $heading2Size: 1.6em;
37 | $heading3Size: 1.3em;
38 | $heading4Size: 1.0em;
39 |
40 | section.has-dark-background {
41 | &, h1, h2, h3, h4, h5, h6 {
42 | color: #fff;
43 | }
44 | }
45 |
46 |
47 | // Theme template ------------------------------
48 | @import "../template/theme";
49 | // ---------------------------------------------
--------------------------------------------------------------------------------
/presentation/reveal.js/css/theme/template/mixins.scss:
--------------------------------------------------------------------------------
1 | @mixin vertical-gradient( $top, $bottom ) {
2 | background: $top;
3 | background: -moz-linear-gradient( top, $top 0%, $bottom 100% );
4 | background: -webkit-gradient( linear, left top, left bottom, color-stop(0%,$top), color-stop(100%,$bottom) );
5 | background: -webkit-linear-gradient( top, $top 0%, $bottom 100% );
6 | background: -o-linear-gradient( top, $top 0%, $bottom 100% );
7 | background: -ms-linear-gradient( top, $top 0%, $bottom 100% );
8 | background: linear-gradient( top, $top 0%, $bottom 100% );
9 | }
10 |
11 | @mixin horizontal-gradient( $top, $bottom ) {
12 | background: $top;
13 | background: -moz-linear-gradient( left, $top 0%, $bottom 100% );
14 | background: -webkit-gradient( linear, left top, right top, color-stop(0%,$top), color-stop(100%,$bottom) );
15 | background: -webkit-linear-gradient( left, $top 0%, $bottom 100% );
16 | background: -o-linear-gradient( left, $top 0%, $bottom 100% );
17 | background: -ms-linear-gradient( left, $top 0%, $bottom 100% );
18 | background: linear-gradient( left, $top 0%, $bottom 100% );
19 | }
20 |
21 | @mixin radial-gradient( $outer, $inner, $type: circle ) {
22 | background: $outer;
23 | background: -moz-radial-gradient( center, $type cover, $inner 0%, $outer 100% );
24 | background: -webkit-gradient( radial, center center, 0px, center center, 100%, color-stop(0%,$inner), color-stop(100%,$outer) );
25 | background: -webkit-radial-gradient( center, $type cover, $inner 0%, $outer 100% );
26 | background: -o-radial-gradient( center, $type cover, $inner 0%, $outer 100% );
27 | background: -ms-radial-gradient( center, $type cover, $inner 0%, $outer 100% );
28 | background: radial-gradient( center, $type cover, $inner 0%, $outer 100% );
29 | }
--------------------------------------------------------------------------------
/presentation/reveal.js/css/theme/template/settings.scss:
--------------------------------------------------------------------------------
1 | // Base settings for all themes that can optionally be
2 | // overridden by the super-theme
3 |
4 | // Background of the presentation
5 | $backgroundColor: #2b2b2b;
6 |
7 | // Primary/body text
8 | $mainFont: 'Lato', sans-serif;
9 | $mainFontSize: 40px;
10 | $mainColor: #eee;
11 |
12 | // Vertical spacing between blocks of text
13 | $blockMargin: 20px;
14 |
15 | // Headings
16 | $headingMargin: 0 0 $blockMargin 0;
17 | $headingFont: 'League Gothic', Impact, sans-serif;
18 | $headingColor: #eee;
19 | $headingLineHeight: 1.2;
20 | $headingLetterSpacing: normal;
21 | $headingTextTransform: uppercase;
22 | $headingTextShadow: none;
23 | $headingFontWeight: normal;
24 | $heading1TextShadow: $headingTextShadow;
25 |
26 | $heading1Size: 3.77em;
27 | $heading2Size: 2.11em;
28 | $heading3Size: 1.55em;
29 | $heading4Size: 1.00em;
30 |
31 | // Links and actions
32 | $linkColor: #13DAEC;
33 | $linkColorHover: lighten( $linkColor, 20% );
34 |
35 | // Text selection
36 | $selectionBackgroundColor: #FF5E99;
37 | $selectionColor: #fff;
38 |
39 | // Generates the presentation background, can be overridden
40 | // to return a background image or gradient
41 | @mixin bodyBackground() {
42 | background: $backgroundColor;
43 | }
--------------------------------------------------------------------------------
/presentation/reveal.js/lib/css/zenburn.css:
--------------------------------------------------------------------------------
1 | /*
2 |
3 | Zenburn style from voldmar.ru (c) Vladimir Epifanov
4 | based on dark.css by Ivan Sagalaev
5 |
6 | */
7 |
8 | .hljs {
9 | display: block;
10 | overflow-x: auto;
11 | padding: 0.5em;
12 | background: #3f3f3f;
13 | color: #dcdcdc;
14 | }
15 |
16 | .hljs-keyword,
17 | .hljs-selector-tag,
18 | .hljs-tag {
19 | color: #e3ceab;
20 | }
21 |
22 | .hljs-template-tag {
23 | color: #dcdcdc;
24 | }
25 |
26 | .hljs-number {
27 | color: #8cd0d3;
28 | }
29 |
30 | .hljs-variable,
31 | .hljs-template-variable,
32 | .hljs-attribute {
33 | color: #efdcbc;
34 | }
35 |
36 | .hljs-literal {
37 | color: #efefaf;
38 | }
39 |
40 | .hljs-subst {
41 | color: #8f8f8f;
42 | }
43 |
44 | .hljs-title,
45 | .hljs-name,
46 | .hljs-selector-id,
47 | .hljs-selector-class,
48 | .hljs-section,
49 | .hljs-type {
50 | color: #efef8f;
51 | }
52 |
53 | .hljs-symbol,
54 | .hljs-bullet,
55 | .hljs-link {
56 | color: #dca3a3;
57 | }
58 |
59 | .hljs-deletion,
60 | .hljs-string,
61 | .hljs-built_in,
62 | .hljs-builtin-name {
63 | color: #cc9393;
64 | }
65 |
66 | .hljs-addition,
67 | .hljs-comment,
68 | .hljs-quote,
69 | .hljs-meta {
70 | color: #7f9f7f;
71 | }
72 |
73 |
74 | .hljs-emphasis {
75 | font-style: italic;
76 | }
77 |
78 | .hljs-strong {
79 | font-weight: bold;
80 | }
81 |
--------------------------------------------------------------------------------
/presentation/reveal.js/lib/font/league-gothic/LICENSE:
--------------------------------------------------------------------------------
1 | SIL Open Font License (OFL)
2 | http://scripts.sil.org/cms/scripts/page.php?site_id=nrsi&id=OFL
3 |
--------------------------------------------------------------------------------
/presentation/reveal.js/lib/font/league-gothic/league-gothic.css:
--------------------------------------------------------------------------------
1 | @font-face {
2 | font-family: 'League Gothic';
3 | src: url('league-gothic.eot');
4 | src: url('league-gothic.eot?#iefix') format('embedded-opentype'),
5 | url('league-gothic.woff') format('woff'),
6 | url('league-gothic.ttf') format('truetype');
7 |
8 | font-weight: normal;
9 | font-style: normal;
10 | }
--------------------------------------------------------------------------------
/presentation/reveal.js/lib/font/league-gothic/league-gothic.eot:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jonnor/ESC-CNN-microcontroller/572c319c7ad4d0a98bf210d59b26f6df923c8e7b/presentation/reveal.js/lib/font/league-gothic/league-gothic.eot
--------------------------------------------------------------------------------
/presentation/reveal.js/lib/font/league-gothic/league-gothic.ttf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jonnor/ESC-CNN-microcontroller/572c319c7ad4d0a98bf210d59b26f6df923c8e7b/presentation/reveal.js/lib/font/league-gothic/league-gothic.ttf
--------------------------------------------------------------------------------
/presentation/reveal.js/lib/font/league-gothic/league-gothic.woff:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jonnor/ESC-CNN-microcontroller/572c319c7ad4d0a98bf210d59b26f6df923c8e7b/presentation/reveal.js/lib/font/league-gothic/league-gothic.woff
--------------------------------------------------------------------------------
/presentation/reveal.js/lib/font/source-sans-pro/source-sans-pro-italic.eot:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jonnor/ESC-CNN-microcontroller/572c319c7ad4d0a98bf210d59b26f6df923c8e7b/presentation/reveal.js/lib/font/source-sans-pro/source-sans-pro-italic.eot
--------------------------------------------------------------------------------
/presentation/reveal.js/lib/font/source-sans-pro/source-sans-pro-italic.ttf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jonnor/ESC-CNN-microcontroller/572c319c7ad4d0a98bf210d59b26f6df923c8e7b/presentation/reveal.js/lib/font/source-sans-pro/source-sans-pro-italic.ttf
--------------------------------------------------------------------------------
/presentation/reveal.js/lib/font/source-sans-pro/source-sans-pro-italic.woff:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jonnor/ESC-CNN-microcontroller/572c319c7ad4d0a98bf210d59b26f6df923c8e7b/presentation/reveal.js/lib/font/source-sans-pro/source-sans-pro-italic.woff
--------------------------------------------------------------------------------
/presentation/reveal.js/lib/font/source-sans-pro/source-sans-pro-regular.eot:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jonnor/ESC-CNN-microcontroller/572c319c7ad4d0a98bf210d59b26f6df923c8e7b/presentation/reveal.js/lib/font/source-sans-pro/source-sans-pro-regular.eot
--------------------------------------------------------------------------------
/presentation/reveal.js/lib/font/source-sans-pro/source-sans-pro-regular.ttf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jonnor/ESC-CNN-microcontroller/572c319c7ad4d0a98bf210d59b26f6df923c8e7b/presentation/reveal.js/lib/font/source-sans-pro/source-sans-pro-regular.ttf
--------------------------------------------------------------------------------
/presentation/reveal.js/lib/font/source-sans-pro/source-sans-pro-regular.woff:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jonnor/ESC-CNN-microcontroller/572c319c7ad4d0a98bf210d59b26f6df923c8e7b/presentation/reveal.js/lib/font/source-sans-pro/source-sans-pro-regular.woff
--------------------------------------------------------------------------------
/presentation/reveal.js/lib/font/source-sans-pro/source-sans-pro-semibold.eot:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jonnor/ESC-CNN-microcontroller/572c319c7ad4d0a98bf210d59b26f6df923c8e7b/presentation/reveal.js/lib/font/source-sans-pro/source-sans-pro-semibold.eot
--------------------------------------------------------------------------------
/presentation/reveal.js/lib/font/source-sans-pro/source-sans-pro-semibold.ttf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jonnor/ESC-CNN-microcontroller/572c319c7ad4d0a98bf210d59b26f6df923c8e7b/presentation/reveal.js/lib/font/source-sans-pro/source-sans-pro-semibold.ttf
--------------------------------------------------------------------------------
/presentation/reveal.js/lib/font/source-sans-pro/source-sans-pro-semibold.woff:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jonnor/ESC-CNN-microcontroller/572c319c7ad4d0a98bf210d59b26f6df923c8e7b/presentation/reveal.js/lib/font/source-sans-pro/source-sans-pro-semibold.woff
--------------------------------------------------------------------------------
/presentation/reveal.js/lib/font/source-sans-pro/source-sans-pro-semibolditalic.eot:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jonnor/ESC-CNN-microcontroller/572c319c7ad4d0a98bf210d59b26f6df923c8e7b/presentation/reveal.js/lib/font/source-sans-pro/source-sans-pro-semibolditalic.eot
--------------------------------------------------------------------------------
/presentation/reveal.js/lib/font/source-sans-pro/source-sans-pro-semibolditalic.ttf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jonnor/ESC-CNN-microcontroller/572c319c7ad4d0a98bf210d59b26f6df923c8e7b/presentation/reveal.js/lib/font/source-sans-pro/source-sans-pro-semibolditalic.ttf
--------------------------------------------------------------------------------
/presentation/reveal.js/lib/font/source-sans-pro/source-sans-pro-semibolditalic.woff:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jonnor/ESC-CNN-microcontroller/572c319c7ad4d0a98bf210d59b26f6df923c8e7b/presentation/reveal.js/lib/font/source-sans-pro/source-sans-pro-semibolditalic.woff
--------------------------------------------------------------------------------
/presentation/reveal.js/lib/font/source-sans-pro/source-sans-pro.css:
--------------------------------------------------------------------------------
1 | @font-face {
2 | font-family: 'Source Sans Pro';
3 | src: url('source-sans-pro-regular.eot');
4 | src: url('source-sans-pro-regular.eot?#iefix') format('embedded-opentype'),
5 | url('source-sans-pro-regular.woff') format('woff'),
6 | url('source-sans-pro-regular.ttf') format('truetype');
7 | font-weight: normal;
8 | font-style: normal;
9 | }
10 |
11 | @font-face {
12 | font-family: 'Source Sans Pro';
13 | src: url('source-sans-pro-italic.eot');
14 | src: url('source-sans-pro-italic.eot?#iefix') format('embedded-opentype'),
15 | url('source-sans-pro-italic.woff') format('woff'),
16 | url('source-sans-pro-italic.ttf') format('truetype');
17 | font-weight: normal;
18 | font-style: italic;
19 | }
20 |
21 | @font-face {
22 | font-family: 'Source Sans Pro';
23 | src: url('source-sans-pro-semibold.eot');
24 | src: url('source-sans-pro-semibold.eot?#iefix') format('embedded-opentype'),
25 | url('source-sans-pro-semibold.woff') format('woff'),
26 | url('source-sans-pro-semibold.ttf') format('truetype');
27 | font-weight: 600;
28 | font-style: normal;
29 | }
30 |
31 | @font-face {
32 | font-family: 'Source Sans Pro';
33 | src: url('source-sans-pro-semibolditalic.eot');
34 | src: url('source-sans-pro-semibolditalic.eot?#iefix') format('embedded-opentype'),
35 | url('source-sans-pro-semibolditalic.woff') format('woff'),
36 | url('source-sans-pro-semibolditalic.ttf') format('truetype');
37 | font-weight: 600;
38 | font-style: italic;
39 | }
--------------------------------------------------------------------------------
/presentation/reveal.js/lib/js/classList.js:
--------------------------------------------------------------------------------
1 | /*! @source http://purl.eligrey.com/github/classList.js/blob/master/classList.js*/
2 | if(typeof document!=="undefined"&&!("classList" in document.createElement("a"))){(function(j){var a="classList",f="prototype",m=(j.HTMLElement||j.Element)[f],b=Object,k=String[f].trim||function(){return this.replace(/^\s+|\s+$/g,"")},c=Array[f].indexOf||function(q){var p=0,o=this.length;for(;pbody{font-family: sans-serif;}reveal.js multiplex server.
Generate token');
38 | res.end();
39 | });
40 | stream.on('readable', function() {
41 | stream.pipe(res);
42 | });
43 | });
44 |
45 | app.get("/token", function(req,res) {
46 | var ts = new Date().getTime();
47 | var rand = Math.floor(Math.random()*9999999);
48 | var secret = ts.toString() + rand.toString();
49 | res.send({secret: secret, socketId: createHash(secret)});
50 | });
51 |
52 | var createHash = function(secret) {
53 | var cipher = crypto.createCipher('blowfish', secret);
54 | return(cipher.final('hex'));
55 | };
56 |
57 | // Actually listen
58 | server.listen( opts.port || null );
59 |
60 | var brown = '\033[33m',
61 | green = '\033[32m',
62 | reset = '\033[0m';
63 |
64 | console.log( brown + "reveal.js:" + reset + " Multiplex running on port " + green + opts.port + reset );
--------------------------------------------------------------------------------
/presentation/reveal.js/plugin/multiplex/master.js:
--------------------------------------------------------------------------------
1 | (function() {
2 |
3 | // Don't emit events from inside of notes windows
4 | if ( window.location.search.match( /receiver/gi ) ) { return; }
5 |
6 | var multiplex = Reveal.getConfig().multiplex;
7 |
8 | var socket = io.connect( multiplex.url );
9 |
10 | function post() {
11 |
12 | var messageData = {
13 | state: Reveal.getState(),
14 | secret: multiplex.secret,
15 | socketId: multiplex.id
16 | };
17 |
18 | socket.emit( 'multiplex-statechanged', messageData );
19 |
20 | };
21 |
22 | // post once the page is loaded, so the client follows also on "open URL".
23 | window.addEventListener( 'load', post );
24 |
25 | // Monitor events that trigger a change in state
26 | Reveal.addEventListener( 'slidechanged', post );
27 | Reveal.addEventListener( 'fragmentshown', post );
28 | Reveal.addEventListener( 'fragmenthidden', post );
29 | Reveal.addEventListener( 'overviewhidden', post );
30 | Reveal.addEventListener( 'overviewshown', post );
31 | Reveal.addEventListener( 'paused', post );
32 | Reveal.addEventListener( 'resumed', post );
33 |
34 | }());
35 |
--------------------------------------------------------------------------------
/presentation/reveal.js/plugin/multiplex/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "reveal-js-multiplex",
3 | "version": "1.0.0",
4 | "description": "reveal.js multiplex server",
5 | "homepage": "http://revealjs.com",
6 | "scripts": {
7 | "start": "node index.js"
8 | },
9 | "engines": {
10 | "node": "~4.1.1"
11 | },
12 | "dependencies": {
13 | "express": "~4.13.3",
14 | "grunt-cli": "~0.1.13",
15 | "mustache": "~2.2.1",
16 | "socket.io": "~1.3.7"
17 | },
18 | "license": "MIT"
19 | }
20 |
--------------------------------------------------------------------------------
/presentation/reveal.js/plugin/notes-server/client.js:
--------------------------------------------------------------------------------
1 | (function() {
2 |
3 | // don't emit events from inside the previews themselves
4 | if( window.location.search.match( /receiver/gi ) ) { return; }
5 |
6 | var socket = io.connect( window.location.origin ),
7 | socketId = Math.random().toString().slice( 2 );
8 |
9 | console.log( 'View slide notes at ' + window.location.origin + '/notes/' + socketId );
10 |
11 | window.open( window.location.origin + '/notes/' + socketId, 'notes-' + socketId );
12 |
13 | /**
14 | * Posts the current slide data to the notes window
15 | */
16 | function post() {
17 |
18 | var slideElement = Reveal.getCurrentSlide(),
19 | notesElement = slideElement.querySelector( 'aside.notes' );
20 |
21 | var messageData = {
22 | notes: '',
23 | markdown: false,
24 | socketId: socketId,
25 | state: Reveal.getState()
26 | };
27 |
28 | // Look for notes defined in a slide attribute
29 | if( slideElement.hasAttribute( 'data-notes' ) ) {
30 | messageData.notes = slideElement.getAttribute( 'data-notes' );
31 | }
32 |
33 | // Look for notes defined in an aside element
34 | if( notesElement ) {
35 | messageData.notes = notesElement.innerHTML;
36 | messageData.markdown = typeof notesElement.getAttribute( 'data-markdown' ) === 'string';
37 | }
38 |
39 | socket.emit( 'statechanged', messageData );
40 |
41 | }
42 |
43 | // When a new notes window connects, post our current state
44 | socket.on( 'new-subscriber', function( data ) {
45 | post();
46 | } );
47 |
48 | // When the state changes from inside of the speaker view
49 | socket.on( 'statechanged-speaker', function( data ) {
50 | Reveal.setState( data.state );
51 | } );
52 |
53 | // Monitor events that trigger a change in state
54 | Reveal.addEventListener( 'slidechanged', post );
55 | Reveal.addEventListener( 'fragmentshown', post );
56 | Reveal.addEventListener( 'fragmenthidden', post );
57 | Reveal.addEventListener( 'overviewhidden', post );
58 | Reveal.addEventListener( 'overviewshown', post );
59 | Reveal.addEventListener( 'paused', post );
60 | Reveal.addEventListener( 'resumed', post );
61 |
62 | // Post the initial state
63 | post();
64 |
65 | }());
66 |
--------------------------------------------------------------------------------
/presentation/reveal.js/plugin/notes-server/index.js:
--------------------------------------------------------------------------------
1 | var http = require('http');
2 | var express = require('express');
3 | var fs = require('fs');
4 | var io = require('socket.io');
5 | var Mustache = require('mustache');
6 |
7 | var app = express();
8 | var staticDir = express.static;
9 | var server = http.createServer(app);
10 |
11 | io = io(server);
12 |
13 | var opts = {
14 | port : 1947,
15 | baseDir : __dirname + '/../../'
16 | };
17 |
18 | io.on( 'connection', function( socket ) {
19 |
20 | socket.on( 'new-subscriber', function( data ) {
21 | socket.broadcast.emit( 'new-subscriber', data );
22 | });
23 |
24 | socket.on( 'statechanged', function( data ) {
25 | delete data.state.overview;
26 | socket.broadcast.emit( 'statechanged', data );
27 | });
28 |
29 | socket.on( 'statechanged-speaker', function( data ) {
30 | delete data.state.overview;
31 | socket.broadcast.emit( 'statechanged-speaker', data );
32 | });
33 |
34 | });
35 |
36 | [ 'css', 'js', 'images', 'plugin', 'lib' ].forEach( function( dir ) {
37 | app.use( '/' + dir, staticDir( opts.baseDir + dir ) );
38 | });
39 |
40 | app.get('/', function( req, res ) {
41 |
42 | res.writeHead( 200, { 'Content-Type': 'text/html' } );
43 | fs.createReadStream( opts.baseDir + '/index.html' ).pipe( res );
44 |
45 | });
46 |
47 | app.get( '/notes/:socketId', function( req, res ) {
48 |
49 | fs.readFile( opts.baseDir + 'plugin/notes-server/notes.html', function( err, data ) {
50 | res.send( Mustache.to_html( data.toString(), {
51 | socketId : req.params.socketId
52 | }));
53 | });
54 |
55 | });
56 |
57 | // Actually listen
58 | server.listen( opts.port || null );
59 |
60 | var brown = '\033[33m',
61 | green = '\033[32m',
62 | reset = '\033[0m';
63 |
64 | var slidesLocation = 'http://localhost' + ( opts.port ? ( ':' + opts.port ) : '' );
65 |
66 | console.log( brown + 'reveal.js - Speaker Notes' + reset );
67 | console.log( '1. Open the slides at ' + green + slidesLocation + reset );
68 | console.log( '2. Click on the link in your JS console to go to the notes page' );
69 | console.log( '3. Advance through your slides and your notes will advance automatically' );
70 |
--------------------------------------------------------------------------------
/presentation/reveal.js/plugin/print-pdf/print-pdf.js:
--------------------------------------------------------------------------------
1 | /**
2 | * phantomjs script for printing presentations to PDF.
3 | *
4 | * Example:
5 | * phantomjs print-pdf.js "http://revealjs.com?print-pdf" reveal-demo.pdf
6 | *
7 | * @author Manuel Bieh (https://github.com/manuelbieh)
8 | * @author Hakim El Hattab (https://github.com/hakimel)
9 | * @author Manuel Riezebosch (https://github.com/riezebosch)
10 | */
11 |
12 | // html2pdf.js
13 | var system = require( 'system' );
14 |
15 | var probePage = new WebPage();
16 | var printPage = new WebPage();
17 |
18 | var inputFile = system.args[1] || 'index.html?print-pdf';
19 | var outputFile = system.args[2] || 'slides.pdf';
20 |
21 | if( outputFile.match( /\.pdf$/gi ) === null ) {
22 | outputFile += '.pdf';
23 | }
24 |
25 | console.log( 'Export PDF: Reading reveal.js config [1/4]' );
26 |
27 | probePage.open( inputFile, function( status ) {
28 |
29 | console.log( 'Export PDF: Preparing print layout [2/4]' );
30 |
31 | var config = probePage.evaluate( function() {
32 | return Reveal.getConfig();
33 | } );
34 |
35 | if( config ) {
36 |
37 | printPage.paperSize = {
38 | width: Math.floor( config.width * ( 1 + config.margin ) ),
39 | height: Math.floor( config.height * ( 1 + config.margin ) ),
40 | border: 0
41 | };
42 |
43 | printPage.open( inputFile, function( status ) {
44 | console.log( 'Export PDF: Preparing pdf [3/4]')
45 | printPage.evaluate( function() {
46 | Reveal.isReady() ? window.callPhantom() : Reveal.addEventListener( 'pdf-ready', window.callPhantom );
47 | } );
48 | } );
49 |
50 | printPage.onCallback = function( data ) {
51 | // For some reason we need to "jump the queue" for syntax highlighting to work.
52 | // See: http://stackoverflow.com/a/3580132/129269
53 | setTimeout( function() {
54 | console.log( 'Export PDF: Writing file [4/4]' );
55 | printPage.render( outputFile );
56 | console.log( 'Export PDF: Finished successfully!' );
57 | phantom.exit();
58 | }, 0 );
59 | };
60 | }
61 | else {
62 |
63 | console.log( 'Export PDF: Unable to read reveal.js config. Make sure the input address points to a reveal.js page.' );
64 | phantom.exit( 1 );
65 |
66 | }
67 | } );
68 |
--------------------------------------------------------------------------------
/presentation/style.css:
--------------------------------------------------------------------------------
1 |
2 |
3 | .intro .slide-background-content {
4 | background-color: rgba(255, 255, 255, 1.0);
5 | background-position:center;
6 | background-size: 100%;
7 | background-repeat: no-repeat;
8 | }
9 |
10 | .questions .slide-background {
11 | background-color: rgba(255, 255, 255, 1.0);
12 | background-position:center;
13 | background-size: 100%;
14 | background-repeat: no-repeat;
15 | }
16 |
17 | #questions h1 {
18 | color: rgba(0, 0, 0, 1.0);
19 | }
20 |
21 |
22 |
23 | .reveal section img {
24 | border:none;
25 | box-shadow:none;
26 | background: rgba(255, 255, 255, 1.0);
27 | margin: 0px;
28 | padding: 10px;
29 | }
30 |
31 | #soundsensing h2 {
32 | display: none;
33 | }
34 |
35 | #demo-video h2 {
36 | display: none;
37 | }
38 |
39 | #architecture h2 {
40 | display: none;
41 | }
42 |
43 | #pipeline h2 {
44 | display: none;
45 | }
46 |
47 | #confusion {
48 | padding: 0px;
49 | }
50 |
51 | #confusion p {
52 | margin: 0px;
53 | }
54 |
55 | #confusion h2 {
56 | display: none;
57 | }
58 |
59 |
--------------------------------------------------------------------------------
/report.py:
--------------------------------------------------------------------------------
1 |
2 | from microesc import report
3 | report.main()
4 |
--------------------------------------------------------------------------------
/report/163459__littlebigsounds__lbs-fx-dog-small-alert-bark001.wav:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jonnor/ESC-CNN-microcontroller/572c319c7ad4d0a98bf210d59b26f6df923c8e7b/report/163459__littlebigsounds__lbs-fx-dog-small-alert-bark001.wav
--------------------------------------------------------------------------------
/report/Makefile:
--------------------------------------------------------------------------------
1 |
2 | all: report
3 |
4 | report: report.pdf
5 |
6 | pyincludes/%.tex: pyincludes/%.py
7 | PYTHONPATH=../ python3 $< $@
8 |
9 | pyplots/%.png: pyplots/%.py
10 | PYTHONPATH=../ python3 $< $@
11 |
12 | plots/%.tex: plot.py
13 | PYTHONPATH=../ python3 plot.py $@
14 |
15 | plots/%.png: plot.py
16 | PYTHONPATH=../ python3 plot.py $@
17 |
18 |
19 | includes: pyincludes/urbansound8k-classes.tex \
20 | pyincludes/microcontrollers.tex \
21 | pyincludes/experiment-settings.tex \
22 | pyincludes/models.tex \
23 | pyincludes/results.tex \
24 | pyplots/logloss.png \
25 | pyplots/filterbanks.png \
26 | pyplots/dataaugmentations.png \
27 | plots/urbansound8k-examples.png \
28 | plots/urbansound8k-existing-models-logmel.tex \
29 | plots/urbansound8k-existing-models-logmel.png
30 |
31 |
32 | report.pdf: report.md includes
33 | pandoc --include-before-body=cover.latex --include-before-body=abstract.latex --include-after-body=end.latex --bibliography=references.bib -V papersize:a4 -V geometry:margin=1.0in -V fontsize=12pt -H preamble.tex --csl ieee.csl --toc -Vlof -Vlot --pdf-engine-opt=-shell-escape --variable subparagraph --number-sections --lua-filter=short-captions.lua -s report.md -o report.pdf
34 |
35 | status.pdf: status.md
36 | pandoc -t beamer -s status.md -o status.pdf --slide-level=2 --mathml
37 |
--------------------------------------------------------------------------------
/report/abstract.latex:
--------------------------------------------------------------------------------
1 |
2 | \newpage
3 | \thispagestyle{empty}
4 | \mbox{}
5 |
6 | \begin{abstract}
7 |
8 | Noise is a growing problem in urban areas,
9 | and according to the WHO is the second environmental cause of health problems in Europe.
10 | Noise monitoring using Wireless Sensor Networks are
11 | being applied in order to understand and help mitigate these noise problems.
12 | It is desirable that these sensor systems, in addition to logging the sound level,
13 | can indicate what the likely sound source is.
14 | However, transmitting audio to a cloud system for classification is
15 | energy-intensive and may cause privacy issues.
16 | It is also critical for widespread adoption and dense sensor coverage that
17 | individual sensor nodes are low-cost.
18 | Therefore we propose to perform the noise classification on the sensor node,
19 | using a low-cost microcontroller.
20 |
21 | Several Convolutional Neural Networks were designed for the
22 | STM32L476 low-power microcontroller using the Keras deep-learning framework,
23 | and deployed using the vendor-provided X-CUBE-AI inference engine.
24 | The resource budget for the model was set at maximum 50\% utilization of CPU, RAM, and FLASH.
25 | 10 model variations were evaluated on the Environmental Sound Classification task
26 | using the standard Urbansound8k dataset.
27 |
28 | The best models used Depthwise-Separable convolutions with striding for downsampling,
29 | and were able to reach 70.9\% mean 10-fold accuracy while consuming only 20\% CPU.
30 | To our knowledge, this is the highest reported performance on Urbansound8k using a microcontroller.
31 | One of the models was also tested on a microcontroller development device,
32 | demonstrating the classification of environmental sounds in real-time.
33 |
34 | These results indicate that it is computationally feasible to classify environmental sound
35 | on low-power microcontrollers.
36 | Further development should make it possible to create wireless sensor-networks
37 | for noise monitoring with on-edge noise source classification.
38 |
39 | \end{abstract}
40 |
41 | \thispagestyle{empty}
42 |
--------------------------------------------------------------------------------
/report/cover.latex:
--------------------------------------------------------------------------------
1 |
2 | \includepdf[pages={1}]{cover.pdf}
3 |
--------------------------------------------------------------------------------
/report/cover.odt:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jonnor/ESC-CNN-microcontroller/572c319c7ad4d0a98bf210d59b26f6df923c8e7b/report/cover.odt
--------------------------------------------------------------------------------
/report/cover.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jonnor/ESC-CNN-microcontroller/572c319c7ad4d0a98bf210d59b26f6df923c8e7b/report/cover.pdf
--------------------------------------------------------------------------------
/report/end.latex:
--------------------------------------------------------------------------------
1 |
2 | \includepdf[pages={1}]{end.pdf}
3 |
--------------------------------------------------------------------------------
/report/end.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jonnor/ESC-CNN-microcontroller/572c319c7ad4d0a98bf210d59b26f6df923c8e7b/report/end.pdf
--------------------------------------------------------------------------------
/report/img/CMSIS-NN-functions.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jonnor/ESC-CNN-microcontroller/572c319c7ad4d0a98bf210d59b26f6df923c8e7b/report/img/CMSIS-NN-functions.png
--------------------------------------------------------------------------------
/report/img/SONYC-CPS.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jonnor/ESC-CNN-microcontroller/572c319c7ad4d0a98bf210d59b26f6df923c8e7b/report/img/SONYC-CPS.png
--------------------------------------------------------------------------------
/report/img/ST-Orlando-SoC.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jonnor/ESC-CNN-microcontroller/572c319c7ad4d0a98bf210d59b26f6df923c8e7b/report/img/ST-Orlando-SoC.png
--------------------------------------------------------------------------------
/report/img/STM32F103VGT6-LD.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jonnor/ESC-CNN-microcontroller/572c319c7ad4d0a98bf210d59b26f6df923c8e7b/report/img/STM32F103VGT6-LD.jpg
--------------------------------------------------------------------------------
/report/img/activation-functions.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jonnor/ESC-CNN-microcontroller/572c319c7ad4d0a98bf210d59b26f6df923c8e7b/report/img/activation-functions.png
--------------------------------------------------------------------------------
/report/img/analysis-windows.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jonnor/ESC-CNN-microcontroller/572c319c7ad4d0a98bf210d59b26f6df923c8e7b/report/img/analysis-windows.png
--------------------------------------------------------------------------------
/report/img/artificial-neuron.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jonnor/ESC-CNN-microcontroller/572c319c7ad4d0a98bf210d59b26f6df923c8e7b/report/img/artificial-neuron.png
--------------------------------------------------------------------------------
/report/img/audio-aquisition.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jonnor/ESC-CNN-microcontroller/572c319c7ad4d0a98bf210d59b26f6df923c8e7b/report/img/audio-aquisition.png
--------------------------------------------------------------------------------
/report/img/classification-pipeline.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jonnor/ESC-CNN-microcontroller/572c319c7ad4d0a98bf210d59b26f6df923c8e7b/report/img/classification-pipeline.png
--------------------------------------------------------------------------------
/report/img/conv-block-effnet.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jonnor/ESC-CNN-microcontroller/572c319c7ad4d0a98bf210d59b26f6df923c8e7b/report/img/conv-block-effnet.png
--------------------------------------------------------------------------------
/report/img/conv-block-mobilenet.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jonnor/ESC-CNN-microcontroller/572c319c7ad4d0a98bf210d59b26f6df923c8e7b/report/img/conv-block-mobilenet.png
--------------------------------------------------------------------------------
/report/img/conv-block-shufflenet.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jonnor/ESC-CNN-microcontroller/572c319c7ad4d0a98bf210d59b26f6df923c8e7b/report/img/conv-block-shufflenet.png
--------------------------------------------------------------------------------
/report/img/conv-blocks-imagenets.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jonnor/ESC-CNN-microcontroller/572c319c7ad4d0a98bf210d59b26f6df923c8e7b/report/img/conv-blocks-imagenets.png
--------------------------------------------------------------------------------
/report/img/conv-depthwise-separable.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jonnor/ESC-CNN-microcontroller/572c319c7ad4d0a98bf210d59b26f6df923c8e7b/report/img/conv-depthwise-separable.png
--------------------------------------------------------------------------------
/report/img/conv-grouped-1x1-g3.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jonnor/ESC-CNN-microcontroller/572c319c7ad4d0a98bf210d59b26f6df923c8e7b/report/img/conv-grouped-1x1-g3.png
--------------------------------------------------------------------------------
/report/img/conv-grouped-3x3-g3.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jonnor/ESC-CNN-microcontroller/572c319c7ad4d0a98bf210d59b26f6df923c8e7b/report/img/conv-grouped-3x3-g3.png
--------------------------------------------------------------------------------
/report/img/conv-standard.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jonnor/ESC-CNN-microcontroller/572c319c7ad4d0a98bf210d59b26f6df923c8e7b/report/img/conv-standard.png
--------------------------------------------------------------------------------
/report/img/convolution-2d.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jonnor/ESC-CNN-microcontroller/572c319c7ad4d0a98bf210d59b26f6df923c8e7b/report/img/convolution-2d.png
--------------------------------------------------------------------------------
/report/img/crossvalidation.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jonnor/ESC-CNN-microcontroller/572c319c7ad4d0a98bf210d59b26f6df923c8e7b/report/img/crossvalidation.png
--------------------------------------------------------------------------------
/report/img/demo-tightcrop.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jonnor/ESC-CNN-microcontroller/572c319c7ad4d0a98bf210d59b26f6df923c8e7b/report/img/demo-tightcrop.jpg
--------------------------------------------------------------------------------
/report/img/depthwise-separable-convolution.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jonnor/ESC-CNN-microcontroller/572c319c7ad4d0a98bf210d59b26f6df923c8e7b/report/img/depthwise-separable-convolution.png
--------------------------------------------------------------------------------
/report/img/envnet.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jonnor/ESC-CNN-microcontroller/572c319c7ad4d0a98bf210d59b26f6df923c8e7b/report/img/envnet.png
--------------------------------------------------------------------------------
/report/img/frontpage.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jonnor/ESC-CNN-microcontroller/572c319c7ad4d0a98bf210d59b26f6df923c8e7b/report/img/frontpage.png
--------------------------------------------------------------------------------
/report/img/lenet5.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jonnor/ESC-CNN-microcontroller/572c319c7ad4d0a98bf210d59b26f6df923c8e7b/report/img/lenet5.png
--------------------------------------------------------------------------------
/report/img/maxpooling.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jonnor/ESC-CNN-microcontroller/572c319c7ad4d0a98bf210d59b26f6df923c8e7b/report/img/maxpooling.png
--------------------------------------------------------------------------------
/report/img/multilayer-perceptron.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jonnor/ESC-CNN-microcontroller/572c319c7ad4d0a98bf210d59b26f6df923c8e7b/report/img/multilayer-perceptron.png
--------------------------------------------------------------------------------
/report/img/noiseseverity.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jonnor/ESC-CNN-microcontroller/572c319c7ad4d0a98bf210d59b26f6df923c8e7b/report/img/noiseseverity.png
--------------------------------------------------------------------------------
/report/img/piczak-cnn.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jonnor/ESC-CNN-microcontroller/572c319c7ad4d0a98bf210d59b26f6df923c8e7b/report/img/piczak-cnn.png
--------------------------------------------------------------------------------
/report/img/results.csv:
--------------------------------------------------------------------------------
1 | experiment,result_path,maccs_frame,flash_usage,ram_usage_max,ram_usage_min,test_acc_mean,foreground_test_acc_mean,background_test_acc_mean,model,conv_block,n_stages,conv_size,downsample_size,filters,modelcheck,nickname,classifications_per_second
2 | 0,./data/results/20190501-0223/0.confusion.npz,10185806.0,415100.0,36290.0,36290.0,0.7311827956989249,0.8427230046948355,0.4956629491945476,sbcnn,conv,3,5x5,3x2,24,skip,Baseline,2.7777777777777777
3 | 1,./data/results/20190501-0223/1.confusion.npz,2980798.0,381150.0,56720.0,56720.0,0.7185716182131953,0.8210093896713615,0.5022717885171417,strided,conv,3,5x5,2x2,22,,Stride-5x5,2.7777777777777777
4 | 10,./data/results/20190501-0223/10.confusion.npz,468649.0,128750.0,48750.0,48750.0,0.6713128899508827,0.7546948356807511,0.49524989673688563,strided,effnet,3,5x5,2x2,22,,Stride-Effnet-5x5,2.7777777777777777
5 | 2,./data/results/20190501-0223/2.confusion.npz,477236.0,184640.0,56250.0,56250.0,0.7254745785211735,0.8133802816901408,0.5398595621643948,strided,depthwise_separable,3,5x5,2x2,24,,Stride-DS-5x5,2.7777777777777777
6 | 3,./data/results/20190501-0223/3.confusion.npz,318497.0,97650.0,56250.0,56250.0,0.7011814682065578,0.7938184663536776,0.5055762081784386,strided,depthwise_separable,4,3x3,2x2,24,,Stride-DS-3x3,2.7777777777777777
7 | 4,./data/results/20190501-0223/4.confusion.npz,445688.0,81940.0,48750.0,48750.0,0.685517058276915,0.7767996870109547,0.49277158199091287,strided,bottleneck_ds,3,5x5,2x2,22,,Stride-BN-DS-5x5,2.7777777777777777
8 | 5,./data/results/20190501-0223/5.confusion.npz,477236.0,184640.0,56250.0,56250.0,0.7145891411124385,0.812793427230047,0.5072284180090871,strided,depthwise_separable,3,5x5,2x2,24,,DS-5x5-24,2.7777777777777777
9 | 6,./data/results/20190501-0223/6.confusion.npz,380749.0,152810.0,46880.0,46880.0,0.7285278109650869,0.8194444444444443,0.5365551425030979,strided,depthwise_separable,3,5x5,2x2,20,,DS-5x5-20,2.7777777777777777
10 | 7,./data/results/20190501-0223/7.confusion.npz,291318.0,121590.0,37500.0,37500.0,0.7155183857692818,0.8092723004694835,0.5175547294506402,strided,depthwise_separable,3,5x5,2x2,16,,DS-5x5-16,2.7777777777777777
11 | 8,./data/results/20190501-0223/8.confusion.npz,208943.0,90970.0,28130.0,28130.0,0.6998539758396389,0.7924491392801252,0.5043370508054523,strided,depthwise_separable,3,5x5,2x2,12,,DS-5x5-12,2.7777777777777777
12 | 9,./data/results/20190501-0223/9.confusion.npz,1567280.0,98410.0,56350.0,56350.0,0.7265365724147085,0.8395931142410016,0.4878149524989674,sbcnn,depthwise_separable,3,5x5,3x2,24,,Baseline-DS,2.7777777777777777
13 |
--------------------------------------------------------------------------------
/report/img/sensornetworks.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jonnor/ESC-CNN-microcontroller/572c319c7ad4d0a98bf210d59b26f6df923c8e7b/report/img/sensornetworks.png
--------------------------------------------------------------------------------
/report/img/sensortile-annotated.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jonnor/ESC-CNN-microcontroller/572c319c7ad4d0a98bf210d59b26f6df923c8e7b/report/img/sensortile-annotated.jpg
--------------------------------------------------------------------------------
/report/img/sensortile-devkit.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jonnor/ESC-CNN-microcontroller/572c319c7ad4d0a98bf210d59b26f6df923c8e7b/report/img/sensortile-devkit.jpg
--------------------------------------------------------------------------------
/report/img/spatially-separable-convolution.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jonnor/ESC-CNN-microcontroller/572c319c7ad4d0a98bf210d59b26f6df923c8e7b/report/img/spatially-separable-convolution.png
--------------------------------------------------------------------------------
/report/img/stm32cubeai.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jonnor/ESC-CNN-microcontroller/572c319c7ad4d0a98bf210d59b26f6df923c8e7b/report/img/stm32cubeai.png
--------------------------------------------------------------------------------
/report/img/strided-convolution.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jonnor/ESC-CNN-microcontroller/572c319c7ad4d0a98bf210d59b26f6df923c8e7b/report/img/strided-convolution.png
--------------------------------------------------------------------------------
/report/img/training-inference.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jonnor/ESC-CNN-microcontroller/572c319c7ad4d0a98bf210d59b26f6df923c8e7b/report/img/training-inference.png
--------------------------------------------------------------------------------
/report/microcontrollers.csv:
--------------------------------------------------------------------------------
1 | name,architecture,flash_kb,sram_kb,cpufreq_mhz,price_1k_usd
2 | STM32F030CC,Cortex-M0,256,32,48,1.0
3 | STM32L476,Cortex-M4,1024,128,80,5.0
4 | STM32F746,Cortex-M7,1024,1024,216,7.5
5 | STM32H743ZI,Cortex-H7,2048,1024,400,9.0
6 |
--------------------------------------------------------------------------------
/report/no-figure-floats.tex:
--------------------------------------------------------------------------------
1 | \usepackage{float}
2 | \let\origfigure\figure
3 | \let\endorigfigure\endfigure
4 | \renewenvironment{figure}[1][2] {
5 | \expandafter\origfigure\expandafter[H]
6 | } {
7 | \endorigfigure
8 | }
9 |
--------------------------------------------------------------------------------
/report/plots/urbansound8k-examples.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jonnor/ESC-CNN-microcontroller/572c319c7ad4d0a98bf210d59b26f6df923c8e7b/report/plots/urbansound8k-examples.png
--------------------------------------------------------------------------------
/report/plots/urbansound8k-existing-models-logmel.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jonnor/ESC-CNN-microcontroller/572c319c7ad4d0a98bf210d59b26f6df923c8e7b/report/plots/urbansound8k-existing-models-logmel.png
--------------------------------------------------------------------------------
/report/plots/urbansound8k-existing-models-logmel.tex:
--------------------------------------------------------------------------------
1 | \begin{tabular}{lrrr}
2 | \toprule
3 | {} & Accuracy (\%) & MACC / second & Model parameters \\
4 | name & & & \\
5 | \midrule
6 | Dmix-CNN-mel & 82.6 & 298M & 1180k \\
7 | D-CNN & 81.9 & 458M & 33000k \\
8 | SB-CNN & 79.0 & 25M & 432k \\
9 | LD-CNN & 79.0 & 10M & 580k \\
10 | PiczakCNN & 75.0 & 88M & 25534k \\
11 | \bottomrule
12 | \end{tabular}
13 |
--------------------------------------------------------------------------------
/report/preamble.tex:
--------------------------------------------------------------------------------
1 | \usepackage{booktabs}
2 | \usepackage{minted}
3 | \usepackage{siunitx}
4 | \usepackage[toc,page]{appendix}
5 | \usepackage{pdfpages}
6 | \usepackage[T1]{fontenc}
7 | \usepackage{titlesec, blindtext, color}
8 |
9 |
10 | % https://tex.stackexchange.com/questions/4152/how-do-i-prevent-widow-orphan-lines %
11 | \usepackage[all]{nowidow}
12 |
13 | \definecolor{gray75}{gray}{0.75}
14 | \newcommand{\hsp}{\hspace{20pt}}
15 | \titleformat{\chapter}[hang]{\Huge\bfseries}{\thechapter\hsp\textcolor{gray75}{|}\hsp}{0pt}{\Huge\bfseries}
16 |
17 | \usepackage{tocloft}
18 | \renewcommand\cftlottitlefont{\Huge}
19 | \renewcommand\cftloftitlefont{\Huge}
20 |
--------------------------------------------------------------------------------
/report/pyincludes/experiment-settings.py:
--------------------------------------------------------------------------------
1 |
2 | import sys
3 | import pandas
4 | import numpy
5 | import yaml
6 |
7 | settings = yaml.load(open("../experiments/ldcnn20k60.yaml").read())
8 |
9 | settings = pandas.DataFrame({
10 | 'setting': list(settings.keys()),
11 | 'value': list(settings.values()),
12 | })
13 | settings = settings.set_index('setting')
14 |
15 | print(settings)
16 |
17 | names = {
18 | 'samplerate': 'Samplerate (Hz)',
19 | 'n_mels': 'Melfilter bands',
20 | 'n_fft': 'FFT length (samples)',
21 | 'hop_length': 'FFT hop (samples)', # TODO: also time-resolution milliseconds
22 | 'frames': 'Classification window', # TODO: also in milliseconds?
23 | 'batch': 'Minibatch size',
24 | 'epochs': 'Epochs',
25 | 'train_samples': 'Training samples/epoch',
26 | 'val_samples': 'Validation samples/epoch',
27 | 'learning_rate': 'Learning rate',
28 | 'nesterov_momentum': 'Nesterov momentum',
29 | }
30 |
31 | table = settings.loc[list(names.keys())]
32 | table = table.rename(names)
33 |
34 | out = table.to_latex(header=True, index=True)
35 | print(out)
36 |
37 | outpath = sys.argv[1]
38 | with open(outpath, 'w') as f:
39 | f.write(out)
40 |
--------------------------------------------------------------------------------
/report/pyincludes/experiment-settings.tex:
--------------------------------------------------------------------------------
1 | \begin{tabular}{ll}
2 | \toprule
3 | {} & value \\
4 | setting & \\
5 | \midrule
6 | Samplerate (Hz) & 22050 \\
7 | Melfilter bands & 60 \\
8 | FFT length (samples) & 1024 \\
9 | FFT hop (samples) & 512 \\
10 | Classification window & 31 \\
11 | Minibatch size & 400 \\
12 | Epochs & 100 \\
13 | Training samples/epoch & 30000 \\
14 | Validation samples/epoch & 5000 \\
15 | Learning rate & 0.005 \\
16 | Nesterov momentum & NaN \\
17 | \bottomrule
18 | \end{tabular}
19 |
--------------------------------------------------------------------------------
/report/pyincludes/microcontrollers.py:
--------------------------------------------------------------------------------
1 |
2 | import sys
3 | import pandas
4 | import os.path
5 |
6 | here = os.path.dirname(__file__)
7 | data = pandas.read_csv(os.path.join(here, '../microcontrollers.csv'))
8 |
9 | data = data.rename(columns={
10 | 'name': 'Name',
11 | 'architecture': 'Architecture',
12 | 'sram_kb': 'RAM (kB)',
13 | 'flash_kb': 'Flash (kB)',
14 | 'cpufreq_mhz': 'CPU (MHz)',
15 | 'price_1k_usd': 'Price (USD)',
16 | })
17 | table = data.to_latex(header=True, index=False)
18 | print(table)
19 |
20 | outpath = sys.argv[1]
21 | with open(outpath, 'w') as f:
22 | f.write(table)
23 |
--------------------------------------------------------------------------------
/report/pyincludes/microcontrollers.tex:
--------------------------------------------------------------------------------
1 | \begin{tabular}{llrrrr}
2 | \toprule
3 | Name & Architecture & Flash (kB) & RAM (kB) & CPU (MHz) & Price (USD) \\
4 | \midrule
5 | STM32F030CC & Cortex-M0 & 256 & 32 & 48 & 1.0 \\
6 | STM32L476 & Cortex-M4 & 1024 & 128 & 80 & 5.0 \\
7 | STM32F746 & Cortex-M7 & 1024 & 1024 & 216 & 7.5 \\
8 | STM32H743ZI & Cortex-H7 & 2048 & 1024 & 400 & 9.0 \\
9 | \bottomrule
10 | \end{tabular}
11 |
--------------------------------------------------------------------------------
/report/pyincludes/models.py:
--------------------------------------------------------------------------------
1 |
2 | import sys
3 | import pandas
4 | import numpy
5 | import yaml
6 |
7 | df = pandas.read_csv('results/results.csv')
8 | print(df)
9 |
10 | # conv_block n_stages conv_size downsample_size filters
11 | # TODO: move to model stats
12 |
13 |
14 | # 'duration_avg': 'CPU (seconds)',
15 |
16 | def strformat(fmt, series):
17 | return [fmt.format(i) for i in series]
18 |
19 | df = df.sort_values('nickname', ascending=True)
20 |
21 | #width_multiple = df.nickname.str.startswith('Stride-DS-5x5-')
22 | #df = df.loc[width_multiple == False]
23 |
24 |
25 | conv_shorthand = {
26 | 'depthwise_separable': 'DS',
27 | 'bottleneck_ds': 'BTLN-DS',
28 | 'effnet': 'Effnet',
29 | 'conv': 'standard',
30 | }
31 |
32 | def downsample_from_name(name):
33 | if name.startswith('Stride'):
34 | return 'stride'
35 | else:
36 | return 'maxpool'
37 |
38 | table = pandas.DataFrame({
39 | 'Model': df.nickname,
40 | 'Downsample': [ "{} {}".format(downsample_from_name(n), s) for n, s in zip(df.nickname, df.downsample_size) ],
41 | 'Convolution': [ conv_shorthand[i] for i in df.conv_block ],
42 | 'L': df.n_stages,
43 | 'F': df.filters,
44 | 'MACC': strformat("{:d} K", (df.maccs_frame / 1000).astype(int)),
45 | 'RAM': strformat("{:d} kB", (df.ram_usage_max/1024).astype(int)),
46 | 'FLASH': strformat("{:d} kB", (df.flash_usage/1024).astype(int)),
47 | }, index=df.index)
48 |
49 | out = table.to_latex(header=True, index=False, column_format='lrrllrrr')
50 | print(out)
51 |
52 | outpath = sys.argv[1]
53 | with open(outpath, 'w') as f:
54 | f.write(out)
55 |
--------------------------------------------------------------------------------
/report/pyincludes/models.tex:
--------------------------------------------------------------------------------
1 | \begin{tabular}{lrrllrrr}
2 | \toprule
3 | Model & Downsample & Convolution & L & F & MACC & RAM & FLASH \\
4 | \midrule
5 | Baseline & maxpool 3x2 & standard & 3 & 24 & 10185 K & 35 kB & 405 kB \\
6 | Baseline-DS & maxpool 3x2 & DS & 3 & 24 & 1567 K & 55 kB & 96 kB \\
7 | Stride & stride 2x2 & standard & 3 & 22 & 2980 K & 55 kB & 372 kB \\
8 | Stride-BTLN-DS & stride 2x2 & BTLN-DS & 3 & 22 & 445 K & 47 kB & 80 kB \\
9 | Stride-DS-12 & stride 2x2 & DS & 3 & 12 & 208 K & 27 kB & 88 kB \\
10 | Stride-DS-16 & stride 2x2 & DS & 3 & 16 & 291 K & 36 kB & 118 kB \\
11 | Stride-DS-20 & stride 2x2 & DS & 3 & 20 & 380 K & 45 kB & 149 kB \\
12 | Stride-DS-24 & stride 2x2 & DS & 3 & 24 & 477 K & 54 kB & 180 kB \\
13 | Stride-DS-3x3 & stride 2x2 & DS & 4 & 24 & 318 K & 54 kB & 95 kB \\
14 | Stride-Effnet & stride 2x2 & Effnet & 3 & 22 & 468 K & 47 kB & 125 kB \\
15 | \bottomrule
16 | \end{tabular}
17 |
--------------------------------------------------------------------------------
/report/pyincludes/results.py:
--------------------------------------------------------------------------------
1 |
2 | import sys
3 | import pandas
4 | import numpy
5 | import yaml
6 |
7 | df = pandas.read_csv('results/results.csv')
8 | print(df)
9 |
10 | #width_variations = df.nickname.str.startswith('Stride-DS-5x5-')
11 | #df = df[width_variations != True]
12 | df = df.sort_values('nickname', ascending=True)
13 |
14 | def accuracies(df, col):
15 | mean = df[col+'_mean'] * 100
16 | std = df[col+'_std'] * 100
17 |
18 | fmt = [ "{:.1f}% +-{:.1f}".format(*t) for t in zip(mean, std) ]
19 | return fmt
20 |
21 | def cpu_use(df):
22 | usage = (df.utilization * 1000 * 1/df.classifications_per_second).astype(int)
23 | return ["{:d} ms".format(i).ljust(3) for i in usage]
24 |
25 | table = pandas.DataFrame({
26 | 'Model': df.nickname,
27 | 'CPU use': cpu_use(df),
28 | 'Accuracy': accuracies(df, 'test_acc'),
29 | 'FG Accuracy': accuracies(df, 'foreground_test_acc'),
30 | 'BG Accuracy': accuracies(df, 'background_test_acc'),
31 | }, index=df.index)
32 |
33 |
34 | out = table.to_latex(header=True, index=False, column_format='lrrrr')
35 | out = out.replace('+-', '$\pm$') # XXX: Latex gets mangled by to_table it seems
36 | print(out)
37 |
38 | outpath = sys.argv[1]
39 | with open(outpath, 'w') as f:
40 | f.write(out)
41 |
--------------------------------------------------------------------------------
/report/pyincludes/results.tex:
--------------------------------------------------------------------------------
1 | \begin{tabular}{lrrrr}
2 | \toprule
3 | Model & CPU use & Accuracy & FG Accuracy & BG Accuracy \\
4 | \midrule
5 | Baseline & 971 ms & 72.3\% $\pm$4.6 & 78.3\% $\pm$7.1 & 60.5\% $\pm$7.7 \\
6 | Baseline-DS & 244 ms & 70.2\% $\pm$4.7 & 76.1\% $\pm$7.5 & 58.6\% $\pm$8.2 \\
7 | Stride & 325 ms & 68.3\% $\pm$5.2 & 74.1\% $\pm$6.6 & 56.6\% $\pm$8.0 \\
8 | Stride-BTLN-DS & 71 ms & 64.8\% $\pm$7.1 & 69.5\% $\pm$8.2 & 55.3\% $\pm$8.9 \\
9 | Stride-DS-12 & 38 ms & 66.0\% $\pm$6.0 & 72.6\% $\pm$6.5 & 53.3\% $\pm$9.1 \\
10 | Stride-DS-16 & 51 ms & 67.5\% $\pm$5.6 & 73.3\% $\pm$7.7 & 56.2\% $\pm$8.3 \\
11 | Stride-DS-20 & 66 ms & 68.4\% $\pm$5.2 & 75.0\% $\pm$7.4 & 55.2\% $\pm$10.0 \\
12 | Stride-DS-24 & 81 ms & 70.9\% $\pm$4.3 & 75.8\% $\pm$6.3 & 61.8\% $\pm$6.8 \\
13 | Stride-DS-3x3 & 59 ms & 67.2\% $\pm$6.5 & 73.0\% $\pm$7.4 & 55.8\% $\pm$9.1 \\
14 | Stride-Effnet & 73 ms & 60.7\% $\pm$6.6 & 66.9\% $\pm$7.9 & 48.7\% $\pm$8.3 \\
15 | \bottomrule
16 | \end{tabular}
17 |
--------------------------------------------------------------------------------
/report/pyincludes/urbansound8k-classes.py:
--------------------------------------------------------------------------------
1 |
2 | import sys
3 | from microesc import urbansound8k
4 | import pandas
5 | import numpy
6 |
7 | data = urbansound8k.load_dataset()
8 | by_class = data.groupby('class')
9 | foreground_ratio = by_class.apply(lambda r: numpy.mean(r['salience'] == 1))
10 |
11 | table = pandas.DataFrame({
12 | 'Samples': by_class.count()['classID'],
13 | 'Duration (avg)': by_class.apply(lambda r: '%.2f s' % (r.end-r.start).mean()),
14 | 'In foreground': [ "{} %".format(int(100*r)) for r in foreground_ratio ]
15 | })
16 | out = table.to_latex(header=True, index=True, column_format="lrrr")
17 | print(out)
18 |
19 | outpath = sys.argv[1]
20 | with open(outpath, 'w') as f:
21 | f.write(out)
22 |
--------------------------------------------------------------------------------
/report/pyincludes/urbansound8k-classes.tex:
--------------------------------------------------------------------------------
1 | \begin{tabular}{lrrr}
2 | \toprule
3 | {} & Samples & Duration (avg) & In foreground \\
4 | class & & & \\
5 | \midrule
6 | air\_conditioner & 1000 & 3.99 s & 56 \% \\
7 | car\_horn & 429 & 2.46 s & 35 \% \\
8 | children\_playing & 1000 & 3.96 s & 58 \% \\
9 | dog\_bark & 1000 & 3.15 s & 64 \% \\
10 | drilling & 1000 & 3.55 s & 90 \% \\
11 | engine\_idling & 1000 & 3.94 s & 91 \% \\
12 | gun\_shot & 374 & 1.65 s & 81 \% \\
13 | jackhammer & 1000 & 3.61 s & 73 \% \\
14 | siren & 929 & 3.91 s & 28 \% \\
15 | street\_music & 1000 & 4.00 s & 62 \% \\
16 | \bottomrule
17 | \end{tabular}
18 |
--------------------------------------------------------------------------------
/report/pyplots/dataaugmentations.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jonnor/ESC-CNN-microcontroller/572c319c7ad4d0a98bf210d59b26f6df923c8e7b/report/pyplots/dataaugmentations.png
--------------------------------------------------------------------------------
/report/pyplots/dataaugmentations.py:
--------------------------------------------------------------------------------
1 |
2 | from matplotlib import pyplot as plt
3 | import numpy
4 | import librosa
5 | import librosa.display
6 | import scipy.ndimage
7 |
8 | def plot_augmentations(y, sr, time_shift=3000, pitch_shift = 12, time_stretch = 1.3):
9 | augmentations = {
10 | 'Original': y,
11 |
12 | "Timeshift left": y[time_shift:],
13 | "Timeshift right": numpy.concatenate([numpy.zeros(time_shift), y[:-time_shift]]),
14 |
15 | "Timestretch faster": librosa.effects.time_stretch(y, time_stretch),
16 | "Timestretch slower": librosa.effects.time_stretch(y, 1/time_stretch),
17 |
18 | "Pitchshift up": librosa.effects.pitch_shift(y, sr, pitch_shift),
19 | "Pitchshift down": librosa.effects.pitch_shift(y, sr, -pitch_shift),
20 | }
21 |
22 | layout = [
23 | ["Original", "Original", "Original"],
24 | ["Timeshift right", "Timestretch faster", "Pitchshift up"],
25 | ["Timeshift left", "Timestretch slower", "Pitchshift down"]
26 | ]
27 |
28 | shape = numpy.array(layout).shape
29 | fig, axs = plt.subplots(shape[0], shape[1], figsize=(16,6), sharex=True)
30 |
31 | for row in range(shape[0]):
32 | for col in range(shape[1]):
33 | description = layout[row][col]
34 | ax = axs[row][col]
35 | data = augmentations[description]
36 |
37 | S = numpy.abs(librosa.stft(data))
38 | S = scipy.ndimage.filters.gaussian_filter(S, 0.7)
39 | S = librosa.amplitude_to_db(S, ref=numpy.max)
40 | S -= S.mean()
41 | #S = scipy.ndimage.filters.median_filter(S, (3,3))
42 | librosa.display.specshow(S, ax=ax, sr=sr, y_axis='hz')
43 | ax.set_ylim(0, 5000)
44 | ax.set_title(description)
45 | return fig
46 |
47 | def main():
48 | path = '163459__littlebigsounds__lbs-fx-dog-small-alert-bark001.wav'
49 | y, sr = librosa.load(path, offset=0.1, duration=1.2)
50 | fig = plot_augmentations(y, sr)
51 |
52 |
53 | out = (__file__).replace('.py', '.png')
54 | fig.savefig(out, bbox_inches='tight')
55 |
56 | if __name__ == '__main__':
57 | main()
58 |
59 |
--------------------------------------------------------------------------------
/report/pyplots/filterbanks.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jonnor/ESC-CNN-microcontroller/572c319c7ad4d0a98bf210d59b26f6df923c8e7b/report/pyplots/filterbanks.png
--------------------------------------------------------------------------------
/report/pyplots/logloss.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jonnor/ESC-CNN-microcontroller/572c319c7ad4d0a98bf210d59b26f6df923c8e7b/report/pyplots/logloss.png
--------------------------------------------------------------------------------
/report/pyplots/logloss.py:
--------------------------------------------------------------------------------
1 | import numpy
2 | from matplotlib import pyplot as plt
3 | from sklearn.metrics import log_loss
4 |
5 | def plot_logloss(figsize=(6, 3)):
6 | fig, ax = plt.subplots(1, figsize=figsize)
7 |
8 | yhat = numpy.linspace(0.0, 1.0, 300)
9 | losses_0 = [log_loss([0], [x], labels=[0,1]) for x in yhat]
10 | losses_1 = [log_loss([1], [x], labels=[0,1]) for x in yhat]
11 |
12 | ax.plot(yhat, losses_0, label='true=0')
13 | ax.plot(yhat, losses_1, label='true=1')
14 | ax.legend()
15 |
16 | ax.set_ylim(0, 8)
17 | ax.set_xlim(0, 1)
18 |
19 | return fig
20 |
21 | def main():
22 | fig = plot_logloss()
23 | fig.tight_layout()
24 | out = (__file__).replace('.py', '.png')
25 | fig.savefig(out, bbox_inches='tight')
26 |
27 | if __name__ == '__main__':
28 | main()
29 |
30 |
31 |
--------------------------------------------------------------------------------
/report/results/confusion_test.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jonnor/ESC-CNN-microcontroller/572c319c7ad4d0a98bf210d59b26f6df923c8e7b/report/results/confusion_test.png
--------------------------------------------------------------------------------
/report/results/grouped_confusion_test_foreground.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jonnor/ESC-CNN-microcontroller/572c319c7ad4d0a98bf210d59b26f6df923c8e7b/report/results/grouped_confusion_test_foreground.png
--------------------------------------------------------------------------------
/report/results/models_accuracy.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jonnor/ESC-CNN-microcontroller/572c319c7ad4d0a98bf210d59b26f6df923c8e7b/report/results/models_accuracy.png
--------------------------------------------------------------------------------
/report/results/models_efficiency.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jonnor/ESC-CNN-microcontroller/572c319c7ad4d0a98bf210d59b26f6df923c8e7b/report/results/models_efficiency.png
--------------------------------------------------------------------------------
/report/sections.py:
--------------------------------------------------------------------------------
1 | import pandas
2 |
3 | sections = {
4 | 'cover': 1,
5 | 'summary': 1,
6 | 'toc': 2,
7 | 'introduction': 5,
8 | 'background': 15,
9 | 'materials': 6,
10 | 'methods': 5,
11 | 'results': 5,
12 | 'discussion': 2,
13 | 'conclusion': 1,
14 | 'references': 2,
15 | 'attachments': 5,
16 | }
17 | df = pandas.DataFrame({ 'pages': list(sections.values())}, index=list(sections.keys()))
18 | print(df)
19 |
20 | print(df.pages.sum())
21 |
--------------------------------------------------------------------------------
/report/short-captions.lua:
--------------------------------------------------------------------------------
1 | if FORMAT ~= "latex" then
2 | return
3 | end
4 |
5 | local function latex(str)
6 | return pandoc.RawInline('latex', str)
7 | end
8 |
9 | function figure_image (elem)
10 | local image = elem.content and elem.content[1]
11 | return (image.t == 'Image' and image.title == 'fig:')
12 | and image
13 | or nil
14 | end
15 |
16 | function Para (para)
17 | local img = figure_image(para)
18 | if not img or not img.caption or not img.attributes['short-caption'] then
19 | return nil
20 | end
21 |
22 | local short_caption = pandoc.Span(
23 | pandoc.read(img.attributes['short-caption']).blocks[1].c
24 | )
25 | local hypertarget = "{%%\n"
26 | local label = "\n"
27 | if img.identifier ~= img.title then
28 | hypertarget = string.format("\\hypertarget{%s}{%%\n",img.identifier)
29 | label = string.format("\n\\label{%s}",img.identifier)
30 | end
31 | return pandoc.Para {
32 | latex(hypertarget .. "\\begin{figure}\n\\centering\n"),
33 | img,
34 | latex("\n\\caption["), short_caption, latex("]"), pandoc.Span(img.caption),
35 | latex(label .."\n\\end{figure}\n}\n")
36 | }
37 | end
38 |
--------------------------------------------------------------------------------
/report/status.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: Audio Classification using general-purpose microcontrollers
3 | author: Jon Nordby
4 | date: February 07, 2019
5 | ---
6 |
7 | ## Overall
8 |
9 | **GANTT chart**
10 | https://jonnor.github.io/thesis-audio-classification-microcontrollers/plan.html
11 |
12 | Summary
13 |
14 | - On schedule
15 | - Experimentation going a bit slow
16 |
17 |
18 | ## Tasks for March 7
19 |
20 | - Write methods
21 | - Finish writing introduction
22 | - Fix/add data augmentation
23 | - Setup validation pipeline
24 |
25 | # Writing
26 |
27 | ## Introduction
28 |
29 | - Outline clarified
30 | - 25% done?
31 |
32 | ## Background
33 |
34 | - Outline. 20%
35 | - Some non-mathy stuff there
36 |
37 | ## Materials
38 |
39 | In pretty good shape.
40 |
41 | - Dataset DONE
42 | - Hardware platform DONE
43 | - Software platform 50%
44 | - Existing methods 50%
45 |
46 |
47 | # Experiments
48 |
49 | ## Unhardcoded mel-spec processing
50 |
51 | * Tools for generating Hann window + Mel filters lookup-tables
52 | * Not yet ran on device
53 |
54 | ## Validation pipeline
55 |
56 | - OK
57 | - Can run easily on Google Cloud Engine. But still slow!! CPU utilization low
58 | - Showing voted accuracy included during training, easier to evaluate
59 | - SB-CNN up to 72% accuracy
60 |
61 | Learned
62 |
63 | - Sensitive to hyperparameters!
64 |
65 | ## LD-CNN
66 |
67 | - Good: Was able to get it to run, with single input.
68 |
69 | LD-CNN. Best model: 79% voted, 90% overlap
70 | Using augmentation.
71 |
72 | SB-CNN is 78% with data augmentation
73 |
74 |
75 | # Next
76 |
77 | ## Tasks for March 21
78 |
79 | - Write methods
80 | - Finish writing introduction
81 | - Write in existing results
82 | - Do Hyperparameter search
83 | - Do Model search
84 |
85 |
86 | ## Hypotheses
87 |
88 | **On github**
89 |
90 | https://github.com/jonnor/thesis-audio-classification-microcontrollers/blob/master/braindump.md#hypotheses
91 |
92 |
--------------------------------------------------------------------------------
/report/summary.md:
--------------------------------------------------------------------------------
1 |
2 | # Summary
3 |
4 | TODO: write last
5 |
6 |
--------------------------------------------------------------------------------
/report/urbansound8k-existing.csv:
--------------------------------------------------------------------------------
1 | name,accuracy,noaug_acc,mflops,kparams,reference,augmentation,features,mmacc,samplerate,window,hop,frames,bands
2 | PiczakCNN,0.75,0.73,168,25534,Piczak,"pitchshift,timestretch,noisemix","logmel+delta",84,22050,1024,512,41,60
3 | SB-CNN,0.79,0.73,152,432,SalomonBello,"pitchshift,timestretch,noisemix","logmel",76,44100,1024,1024,128,128
4 | Dmix-CNN-mel,0.826,0.747,837,1180,ZhichaoZhang,"pitchshift,timestretch,Mixup","logmel+delta",443,44100,1024,512,128,128
5 | D-CNN,0.819,,85,33000,Zhang,"timestretch","logmel+delta",330,22050,1024,512,31,60
6 | LD-CNN,0.79,,19.8,580,Zhang2018,"timestretch","logmel+delta",7.5,22050,1024,512,31,60
7 | DilaConv,0.78,,,,YanChen2019,"?","log-MISSING-mel"
8 | Dmix-CNN-gt,0.837,0.777,837,1180,ZhichaoZhang,"pitchshift,timestretch,mixup","gammatone+delta",443,44100,1024,512,128,128
9 | EnvNetv2,0.783,0.691,,,Tokozume,"betweenclass","waveform"
10 | WSNet,0.70,,,288,Xiaojie,"?","waveform"
11 |
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | numpy>=1.14.0
2 | pytest-cov>=2.5.1
3 | scipy>=1.0.0
4 | scikit-learn>=0.19.1
5 | librosa>=0.6.1
6 | keras==2.2.5
7 | tensorflow>=1.12
8 | pandas>=0.23.0
9 | matplotlib>=3.0.0
10 | seaborn>=0.9.0
11 | git+http://github.com/SiggiGue/pyfilterbank
12 | h5py==2.10.0
13 |
--------------------------------------------------------------------------------
/run.py:
--------------------------------------------------------------------------------
1 |
2 | import sys
3 | import os.path
4 | import subprocess
5 | import datetime
6 | import uuid
7 |
8 | import pandas
9 | import numpy
10 |
11 | from microesc import common
12 |
13 | def arglist(options):
14 | args = [ "--{}={}".format(k, v) for k, v in options.items() ]
15 | return args
16 |
17 | def command_for_job(options):
18 | args = [
19 | 'python3', 'train.py'
20 | ]
21 | args += arglist(options)
22 | return args
23 |
24 | def generate_train_jobs(experiments, settings_path, folds, overrides):
25 |
26 | timestamp = datetime.datetime.now().strftime('%Y%m%d-%H%M')
27 | unique = str(uuid.uuid4())[0:4]
28 | def name(experiment, fold):
29 | name = "-".join([experiment, timestamp, unique])
30 | return name+'-fold{}'.format(fold)
31 |
32 | def job(exname, experiment):
33 |
34 | for fold in folds:
35 | n = name(exname, fold)
36 |
37 | options = {
38 | 'name': n,
39 | 'fold': fold,
40 | 'settings': settings_path,
41 | }
42 | for k, v in experiment.items():
43 | # overrides per experiment
44 | options[k] = v
45 |
46 | for k, v in overrides.items():
47 | options[k] = v
48 |
49 | cmd = command_for_job(options)
50 | return cmd
51 |
52 | # FIXME: better job name
53 | jobs = [ job(str(idx), ex) for idx, ex in experiments.iterrows() ]
54 | return jobs
55 |
56 | def parse(args):
57 |
58 | import argparse
59 |
60 | parser = argparse.ArgumentParser(description='Generate jobs')
61 |
62 | #common.add_arguments(parser)
63 |
64 | a = parser.add_argument
65 |
66 | a('--models', default='models.csv',
67 | help='%(default)s')
68 | a('--settings', default='experiments/ldcnn20k60.yaml',
69 | help='%(default)s')
70 |
71 |
72 | a('--jobs', dest='jobs_dir', default='./data/jobs',
73 | help='%(default)s')
74 |
75 | a('--check', action='store_true',
76 | help='Only run a pre-flight check')
77 |
78 | parsed = parser.parse_args(args)
79 |
80 | return parsed
81 |
82 | def main():
83 | args = parse(sys.argv[1:])
84 |
85 | models = pandas.read_csv(args.models)
86 | settings = common.load_settings_path(args.settings)
87 |
88 | overrides = {}
89 | folds = list(range(0, 9))
90 | if args.check:
91 | folds = (1,)
92 | overrides['train_samples'] = settings['batch']*1
93 | overrides['val_samples'] = settings['batch']*1
94 |
95 | cmds = generate_train_jobs(models, args.settings, folds, overrides)
96 |
97 | print('\n'.join(" ".join(cmd) for cmd in cmds))
98 |
99 | if __name__ == '__main__':
100 | main()
101 |
--------------------------------------------------------------------------------
/test.py:
--------------------------------------------------------------------------------
1 |
2 | from microesc import test
3 | test.main()
4 |
--------------------------------------------------------------------------------
/test/test_models.py:
--------------------------------------------------------------------------------
1 |
2 | import pytest
3 | import keras
4 |
5 | from microesc import models
6 | from microesc import settings, stats
7 |
8 | FAMILIES=list(models.families.keys())
9 |
10 | @pytest.mark.skip("fails right now")
11 | @pytest.mark.parametrize('family', FAMILIES)
12 | def test_models_basic(family):
13 | s = settings.load_settings({
14 | 'model': family,
15 | 'frames': 31,
16 | 'n_mels': 60,
17 | 'samplerate': 22050,
18 | })
19 | if family == 'sbcnn':
20 | s['downsample_size'] = (3, 2)
21 | s['conv_size'] = (3, 3)
22 | if family == 'strided':
23 | s['downsample_size'] = (3, 3)
24 | s['conv_size'] = (3, 3)
25 | s['conv_block'] = 'conv'
26 | s['filters'] = 12
27 |
28 | m = models.build(s)
29 |
30 | assert isinstance(m, keras.Model)
31 |
32 |
33 | CONV_TYPES=[
34 | 'conv',
35 | 'depthwise_separable',
36 | 'bottleneck_ds',
37 | 'effnet',
38 | ]
39 | @pytest.mark.parametrize('conv_type', CONV_TYPES)
40 | def test_strided_variations(conv_type):
41 |
42 | s = settings.load_settings({
43 | 'model': 'strided',
44 | 'frames': 31,
45 | 'n_mels': 60,
46 | 'samplerate': 22050,
47 | 'conv_block': conv_type,
48 | 'filters': 20,
49 | })
50 | s['conv_size'] = (3, 3)
51 | s['downsample_size'] = (2, 2)
52 |
53 | m = models.build(s)
54 | assert isinstance(m, keras.Model)
55 |
56 |
57 | def test_conv_ds():
58 | k = (5, 5)
59 | i = (60, 31, 16)
60 | ch = 16
61 |
62 | conv = stats.compute_conv2d(*i, ch, *k)
63 | ds = stats.compute_conv2d_ds(*i, ch, *k)
64 |
65 | ratio = conv / ds
66 | assert ratio > 9.0
67 |
68 | def test_conv_ds3x3():
69 | k = (3, 3)
70 | i = (60, 31, 64)
71 | ch = 64
72 |
73 | conv = stats.compute_conv2d(*i, ch, *k)
74 | ds = stats.compute_conv2d_ds(*i, ch, *k)
75 |
76 | ratio = conv / ds
77 | assert ratio > 7.5
78 |
79 |
--------------------------------------------------------------------------------
/test/test_training.py:
--------------------------------------------------------------------------------
1 |
2 | import itertools
3 |
4 | import pandas
5 | import numpy
6 | import pytest
7 |
8 | from microesc import train, features, urbansound8k
9 |
10 | @pytest.mark.skip("fails")
11 | def test_generator_fake_loader():
12 |
13 | dataset_path = 'data/UrbanSound8K/'
14 | urbansound8k.default_path = dataset_path
15 | data = urbansound8k.load_dataset()
16 | folds, test = urbansound8k.folds(data)
17 |
18 | data_length = 16
19 | batch_size = 8
20 | frames = 72
21 | bands = 32
22 | n_classes = 10
23 |
24 | def zero_loader(s):
25 | #assert
26 | return numpy.zeros((bands, frames, 1))
27 |
28 | fold = folds[0][0]
29 | X = fold[0:data_length]
30 | Y = fold.classID[0:data_length]
31 |
32 | g = train.dataframe_generator(X, Y, loader=zero_loader,
33 | batchsize=batch_size, n_classes=n_classes)
34 |
35 | n_batches = 3
36 | batches = list(itertools.islice(g, n_batches))
37 | assert len(batches) == n_batches
38 | assert len(batches[0]) == 2 # X,y
39 | assert batches[0][0].shape == (batch_size, bands, frames, 1)
40 | assert batches[0][1].shape == (batch_size, n_classes)
41 |
42 |
43 | def test_windows_shorter_than_window():
44 | frame_samples=256
45 | window_frames=64
46 | fs=16000
47 | length = 0.4*fs
48 | w = list(features.sample_windows(int(length), frame_samples, window_frames))
49 | assert len(w) == 1, len(w)
50 | assert w[-1][1] == length
51 |
52 | def test_window_typical():
53 | frame_samples=256
54 | window_frames=64
55 | fs=16000
56 | length = 4.0*fs
57 | w = list(features.sample_windows(int(length), frame_samples, window_frames))
58 | assert len(w) == 8, len(w)
59 | assert w[-1][1] == length
60 |
61 |
62 | def _test_predict_windowed():
63 |
64 | t = test[0:10]
65 |
66 | sbcnn16k32_settings = dict(
67 | feature='mels',
68 | samplerate=16000,
69 | n_mels=32,
70 | fmin=0,
71 | fmax=8000,
72 | n_fft=512,
73 | hop_length=256,
74 | augmentations=5,
75 | )
76 |
77 | def load_sample32(sample):
78 | return features.load_sample(sample, sbcnn16k32_settings, window_frames=72, feature_dir='../../scratch/aug')
79 |
80 | mean_m = features.predict_voted(sbcnn16k32_settings, model, t, loader=load_sample32, method='mean')
81 |
82 |
83 |
--------------------------------------------------------------------------------
/test/test_urbansound.py:
--------------------------------------------------------------------------------
1 |
2 | import os.path
3 | import shutil
4 |
5 | import numpy
6 | import pytest
7 |
8 | from microesc import preprocess, urbansound8k, features, report
9 |
10 | def test_precompute():
11 |
12 | settings = dict(
13 | feature='mels',
14 | samplerate=16000,
15 | n_mels=32,
16 | fmin=0,
17 | fmax=8000,
18 | n_fft=512,
19 | hop_length=256,
20 | augmentations=12,
21 | )
22 |
23 | dir = './pre2'
24 | if os.path.exists(dir):
25 | shutil.rmtree(dir)
26 |
27 | workdir = os.path.abspath(os.path.join(os.path.dirname(__file__), '../data/'))
28 |
29 | data = urbansound8k.load_dataset()
30 | urbansound8k.maybe_download_dataset(workdir)
31 |
32 | d = os.path.join(dir, features.settings_id(settings))
33 | expect_path = features.feature_path(data.iloc[0], d)
34 | assert not os.path.exists(expect_path), expect_path
35 |
36 | preprocess.precompute(data[0:4], settings, out_dir=d, verbose=0, force=True, n_jobs=2)
37 |
38 | assert os.path.exists(expect_path), expect_path
39 |
40 |
41 | def test_grouped_confusion():
42 | cm = numpy.array([
43 | [82, 0, 3, 0, 0, 10, 0, 4, 1, 0],
44 | [ 3, 29, 0, 0, 0, 0, 1, 0, 0, 0],
45 | [ 4, 3, 37, 14, 4, 4, 0, 0, 2, 32],
46 | [ 5, 2, 5, 78, 4, 0, 0, 0, 0, 6],
47 | [23, 2, 4, 1, 55, 4, 2, 6, 3, 0],
48 | [ 9, 0, 0, 4, 3, 70, 0, 5, 1, 1],
49 | [ 0, 0, 0, 5, 0, 0, 27, 0, 0, 0],
50 | [ 0, 0, 2, 0, 1, 1, 1, 91, 0, 0],
51 | [ 9, 11, 9, 4, 0, 1, 0, 0, 46, 3],
52 | [ 1, 7, 7, 0, 7, 0, 0, 0, 3, 75]
53 | ])
54 | gcm, gnames = report.grouped_confusion(cm, report.groups)
55 |
56 | assert(numpy.sum(cm) == numpy.sum(gcm))
57 | assert(gnames[0] == 'social_activity')
58 | assert(gnames[3] == 'domestic_machines')
59 |
60 | expect_correct_social = (37+78+75)+(14+32)+(5+6)+(7+0)
61 | # correct
62 | # + missclassified children playing as other social class
63 | # + missclassified dogbarks as other social class
64 | # + missclassified street music as other social class
65 | assert(gcm[0][0] == expect_correct_social), (gcm[0][0], expect_correct_social)
66 |
67 | # danger, only one class
68 | assert(gcm[3][3] == 82)
69 |
70 |
71 | folds = urbansound8k
72 | CORRECT_FOLDS={
73 | '8val-9train': ((0,1,2,3,4,5,6,7), (8,), (9,)),
74 | }
75 | WRONG_FOLDS={
76 | 'train too short': ((0,1,2), (3,), (4,)),
77 | 'out-of-bounds train': ((0,1,2,3,4,5,6,7), (4,), (5,)),
78 | 'out-of-bounds val': ((0,2,3,4,5,6,7,8), (10,), (5,)),
79 | }
80 |
81 | @pytest.mark.parametrize('example', CORRECT_FOLDS.keys())
82 | def test_ensure_valid_fold_passes_correct(example):
83 | fold = CORRECT_FOLDS[example]
84 | folds.ensure_valid_fold(fold)
85 |
86 | @pytest.mark.parametrize('example', WRONG_FOLDS.keys())
87 | def test_ensure_valid_fold_detects_wrong(example):
88 | fold = WRONG_FOLDS[example]
89 | with pytest.raises(AssertionError) as e_info:
90 | folds.ensure_valid_fold(fold)
91 |
92 | def test_folds_idx():
93 | f = folds.folds_idx(10)
94 | print('\n'+'\n'.join([ str(i) for i in f ]))
95 | assert f[0][2][0] == 0, "first test fold should be 0"
96 | assert f[-1][2][0] == 9, "last test fold should be 9"
97 |
98 |
99 | def test_folds():
100 | data = urbansound8k.load_dataset()
101 | f = urbansound8k.folds(data)
102 | assert len(f) == 10
103 |
104 |
105 |
--------------------------------------------------------------------------------
/train.py:
--------------------------------------------------------------------------------
1 |
2 | from microesc import train
3 | train.main()
4 |
--------------------------------------------------------------------------------