├── .gitmodules
├── .gitignore
├── elm-package.json
├── CONTRIBUTING.md
├── examples
├── Stream.elm
├── Simple.elm
├── visual.html
├── Makefile
├── Scales.elm
└── Visual.elm
├── README.md
├── LICENSE
├── Native
└── WebAudio.js
└── WebAudio.elm
/.gitmodules:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | examples/release/
2 | examples/.soundcloudkey
3 | build/
4 | cache/
5 | *.elmi
6 | *.elmo
7 | *.swp
8 | elm-stuff
9 | elm.js
10 |
11 |
--------------------------------------------------------------------------------
/elm-package.json:
--------------------------------------------------------------------------------
1 | {
2 | "version": "1.0.0",
3 | "summary": "Elm library for accessing the Web Audio API",
4 | "repository": "https://github.com/trotha01/elm-webaudio.git",
5 | "license": "BSD3",
6 | "source-directories": [
7 | "."
8 | ],
9 | "exposed-modules": [
10 | "WebAudio"
11 | ],
12 | "native-modules": true,
13 | "dependencies": {
14 | "elm-lang/core": "4.0.0 <= v < 5.0.0",
15 | "elm-lang/html": "1.0.0 <= v < 2.0.0"
16 | },
17 | "elm-version": "0.17.0 <= v < 0.18.0"
18 | }
--------------------------------------------------------------------------------
/CONTRIBUTING.md:
--------------------------------------------------------------------------------
1 | # Running the Examples
2 |
3 | To run the examples locally (except for Visual.elm):
4 | ```
5 | cd elm-webaudio
6 | elm-reactor
7 | ```
8 |
9 | To compile the examples locally:
10 | ```
11 | cd elm-webaudio
12 | elm-make examples/Simple.elm --output simple.html
13 | elm-make examples/Scales.elm --output scales.html
14 | elm-make examples/Stream.elm --output stream.html
15 | ```
16 |
17 |
18 | ## Visual.elm
19 |
20 | NOTE: you will need to add a soundcloud id to examples/Visual.html for that example to work
21 |
22 | To compile the Visual.elm example:
23 | ```
24 | cd elm-webaudio
25 | elm-make examples/Visual.elm --output examples/Visual.js
26 | ```
27 |
28 | To see the Visual.elm example locally, you can use the simple python server
29 | ```
30 | python -m SimpleHTTPServer 8080
31 | ```
32 | Now navigate to http://localhost:8080/examples/Visual.html
33 |
34 |
--------------------------------------------------------------------------------
/examples/Stream.elm:
--------------------------------------------------------------------------------
1 | module Main (..) where
2 |
3 | import UserMedia exposing (MediaStream, requestUserMedia)
4 | import WebAudio exposing (..)
5 | import Task as T
6 | import Signal as S
7 | import Html exposing (div, text)
8 |
9 |
10 | view model =
11 | case model of
12 | Nothing ->
13 | div [] [ text "Nothing" ]
14 |
15 | Just stream ->
16 | let
17 | node =
18 | createMediaStreamAudioSourceNode DefaultContext stream
19 | |> connectNodes (getDestinationNode DefaultContext) 0 0
20 | in
21 | div [] [ text ("Got user media : " ++ (.label stream)) ]
22 |
23 |
24 | {-| send one time request for usermedia, stream is then forwarded to the
25 | mailbox
26 | -}
27 | port getUserMedia : T.Task x ()
28 | port getUserMedia =
29 | requestUserMedia userMediaStream.address { audio = True, video = False }
30 |
31 |
32 | userMediaStream : S.Mailbox (Maybe MediaStream)
33 | userMediaStream =
34 | S.mailbox Nothing
35 |
36 |
37 | main =
38 | Signal.map view userMediaStream.signal
39 |
--------------------------------------------------------------------------------
/examples/Simple.elm:
--------------------------------------------------------------------------------
1 | module Main exposing (..)
2 |
3 | import WebAudio exposing (OscillatorNode, AudioContext(DefaultContext), stopOscillator, setValue, startOscillator, getDestinationNode, connectNodes, OscillatorWaveType(..), createOscillatorNode)
4 | import Html exposing (..)
5 | import Html.Events exposing (onClick)
6 | import Html.App exposing (beginnerProgram)
7 |
8 |
9 | -- Model
10 |
11 |
12 | model =
13 | 0
14 |
15 |
16 |
17 | -- Update
18 |
19 |
20 | type Action
21 | = Play
22 |
23 |
24 | startMusic action model =
25 | let
26 | _ =
27 | case action of
28 | Play ->
29 | createOscillatorNode DefaultContext Sine
30 | |> connectNodes (getDestinationNode DefaultContext) 0 0
31 | |> startOscillator 0.0
32 | |> stopOscillator 1.0
33 | in
34 | model
35 |
36 |
37 |
38 | -- View
39 |
40 |
41 | view address =
42 | button [ onClick Play ] [ text "Play" ]
43 |
44 |
45 |
46 | -- Main
47 |
48 |
49 | main =
50 | beginnerProgram
51 | { model = model
52 | , view = view
53 | , update = startMusic
54 | }
55 |
56 |
--------------------------------------------------------------------------------
/examples/visual.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | elm-webaudio soundcloud visualizer
5 |
6 |
7 |
8 |
9 |
10 |
28 |
29 |
30 |
--------------------------------------------------------------------------------
/examples/Makefile:
--------------------------------------------------------------------------------
1 | EXAMPLES := $(wildcard *.elm)
2 | TARGETS := $(addprefix build/,$(EXAMPLES:.elm=.html)) build/Visual.js
3 |
4 | INCLUDE_FILES := $(wildcard ../*.elm) $(wildcard ../lib/elm-usermedia/src/*.elm)
5 | SCRIPTS := $(wildcard ../Native/*.js) $(wildcard ../lib/elm-usermedia/src/Native/*.js)
6 |
7 | .PHONY: all
8 | all: $(TARGETS)
9 |
10 | $(TARGETS): $(INCLUDE_FILES) $(SCRIPTS)
11 |
12 | build/%.js: %.elm
13 | elm-make --yes $(CURDIR)/$< --output $(CURDIR)/$@
14 |
15 | build/%.html: %.elm
16 | elm-make --yes $(CURDIR)/$< --output $(CURDIR)/$@
17 | @echo "** Open $@ in your browser"
18 |
19 | # The following rules are for the "Visual" example:
20 | build/Visual.html: Visual.html build/Visual.js
21 | ifneq ("$(wildcard .soundcloudkey)","")
22 | sed 's/YOUR CLIENT ID HERE/$(shell cat .soundcloudkey)/' Visual.html > build/Visual.html
23 | @echo "** Open $@ in your browser"
24 | else
25 | cp Visual.html build/Visual.html
26 | @echo "** Edit $@ to add your SoundCloud API key."
27 | @echo "** You can also put your key in a file called .soundcloudkey"
28 | @echo "** in the same directory as this Makefile and it will"
29 | @echo "** automatically get added when you 'make'"
30 | endif
31 |
32 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | elm-webaudio
2 | ============
3 |
4 | Forked from [bmatcuk](https://github.com/bmatcuk/elm-webaudio) and started
5 | updating for elm 0.17 (WIP)
6 |
7 | To contribute, see the [contributing doc](CONTRIBUTING.md)
8 |
9 | The `elm-webaudio` library connects your [Elm](http://elm-lang.org/) programs
10 | to the [Web Audio API](http://webaudio.github.io/web-audio-api/). This library
11 | is somewhat experimental and incomplete, but it is useable. Check out an example
12 | of the library in use: [oscillator scales](http://www.squeg.net/elm-webaudio/Scales.html).
13 |
14 | And another, much more complicated example:
15 | [soundcloud](http://www.squeg.net/elm-webaudio/Visual.html).
16 |
17 | I highly recommend you check out the documentation for the Web Audio API to
18 | make sure you understand the concepts before trying to use this library. This
19 | library basically represents a fairly thin wrapper around the Web Audio API,
20 | so everything in the Web Audio API documentation applies.
21 |
22 | # Examples
23 | [simple](http://trotha01.github.io/elm-webaudio/examples/simple.html)
24 |
25 | [scales](http://trotha01.github.io/elm-webaudio/examples/scales.html)
26 |
27 | [stream](http://trotha01.github.io/elm-webaudio/examples/stream.html)
28 |
29 | TODO: not currently working, need to add soundcloud id
30 | [visual](http://trotha01.github.io/elm-webaudio/examples/visual.html)
31 |
32 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | Copyright (c) 2014, Bob Matcuk
2 | All rights reserved.
3 |
4 | Redistribution and use in source and binary forms, with or without
5 | modification, are permitted provided that the following conditions are met:
6 |
7 | * Redistributions of source code must retain the above copyright notice, this
8 | list of conditions and the following disclaimer.
9 |
10 | * Redistributions in binary form must reproduce the above copyright notice,
11 | this list of conditions and the following disclaimer in the documentation
12 | and/or other materials provided with the distribution.
13 |
14 | * Neither the name of the author nor the names of its
15 | contributors may be used to endorse or promote products derived from
16 | this software without specific prior written permission.
17 |
18 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
19 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
21 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
22 | FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
24 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
25 | CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
26 | OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 |
29 |
--------------------------------------------------------------------------------
/examples/Scales.elm:
--------------------------------------------------------------------------------
1 | module Main (..) where
2 |
3 | import Graphics.Element exposing (Element, show, flow, down, leftAligned, right, middle, container)
4 | import Graphics.Input exposing (checkbox, button)
5 | import Text
6 | import WebAudio exposing (OscillatorNode, AudioContext(DefaultContext), stopOscillator, setValue, startOscillator, getDestinationNode, connectNodes, OscillatorWaveType(..), createOscillatorNode)
7 | import Signal
8 | import Time exposing (Time)
9 |
10 |
11 | -- Models
12 |
13 |
14 | type alias HalfStep =
15 | Float
16 |
17 |
18 | type alias Label =
19 | String
20 |
21 |
22 | type Tonic
23 | = Tonic Label HalfStep
24 |
25 |
26 | type Scale
27 | = Scale String (List HalfStep)
28 |
29 |
30 | type MusicState
31 | = Playing OscillatorNode HalfStep (List HalfStep) Time
32 | | Paused
33 |
34 |
35 | tonics =
36 | [ Tonic "A" 0
37 | , Tonic "A#/Bb" 1
38 | , Tonic "B" 2
39 | , Tonic "C" 3
40 | , Tonic "C#/Db" 4
41 | , Tonic "D" 5
42 | , Tonic "D#/Eb" 6
43 | , Tonic "E" 7
44 | , Tonic "F" 8
45 | , Tonic "F#/Gb" 9
46 | , Tonic "G" 10
47 | , Tonic "G#/Ab" 11
48 | ]
49 |
50 |
51 | musicalScales =
52 | [ Scale "Major" [ 0, 2, 2, 1, 2, 2, 2, 1 ]
53 | , Scale "Minor" [ 0, 2, 1, 2, 2, 1, 2, 2 ]
54 | ]
55 |
56 |
57 | headOrDie lst err =
58 | let
59 | x =
60 | List.head lst
61 | in
62 | case x of
63 | Just x' ->
64 | x'
65 |
66 | Nothing ->
67 | Debug.crash err
68 |
69 |
70 | visualModel =
71 | { tonic = headOrDie tonics "No tonics defined"
72 | , scale = headOrDie musicalScales "No scales defined"
73 | }
74 |
75 |
76 | musicModel =
77 | { state = Paused, changed = False, buttonCount = 0 }
78 |
79 |
80 | getTonicLabel (Tonic label _) =
81 | label
82 |
83 |
84 | getTonicHalfStep (Tonic _ halfstep) =
85 | halfstep
86 |
87 |
88 | getScaleLabel (Scale label _) =
89 | label
90 |
91 |
92 | getScaleSteps (Scale _ steps) =
93 | steps
94 |
95 |
96 | isPlaying state =
97 | case state of
98 | Paused ->
99 | False
100 |
101 | _ ->
102 | True
103 |
104 |
105 |
106 | -- Update
107 |
108 |
109 | stopMusic oscillator =
110 | let
111 | _ =
112 | stopOscillator 0.0 oscillator
113 | in
114 | Paused
115 |
116 |
117 | updateNote oscillator tonic tones t =
118 | case tones of
119 | tone :: remainingTones ->
120 | let
121 | currentStep =
122 | tonic + tone
123 |
124 | frequency =
125 | (220 * (2 ^ (currentStep / 12)))
126 |
127 | _ =
128 | setValue frequency oscillator.frequency
129 | in
130 | Playing oscillator currentStep remainingTones t
131 |
132 | [] ->
133 | stopMusic oscillator
134 |
135 |
136 | startMusic { tonic, scale } t =
137 | let
138 | node =
139 | createOscillatorNode DefaultContext Sine
140 | |> connectNodes (getDestinationNode DefaultContext) 0 0
141 | |> startOscillator 0.0
142 | in
143 | updateNote node (getTonicHalfStep tonic) (getScaleSteps scale) t
144 |
145 |
146 | updateMusic ( vmodel, btncnt, t ) mmodel =
147 | case mmodel.state of
148 | Playing node tonicStep steps oldt ->
149 | if btncnt /= mmodel.buttonCount then
150 | { state = stopMusic node, changed = True, buttonCount = btncnt }
151 | else
152 | let
153 | newState =
154 | updateNote node tonicStep steps t
155 | in
156 | { state = newState, changed = not <| isPlaying newState, buttonCount = btncnt }
157 |
158 | Paused ->
159 | if btncnt /= mmodel.buttonCount then
160 | { state = startMusic vmodel t, changed = True, buttonCount = btncnt }
161 | else
162 | { mmodel | changed = False }
163 |
164 |
165 |
166 | -- Input
167 |
168 |
169 | tonicInput : Signal.Mailbox Tonic
170 | tonicInput =
171 | Signal.mailbox visualModel.tonic
172 |
173 |
174 | scaleInput : Signal.Mailbox Scale
175 | scaleInput =
176 | Signal.mailbox visualModel.scale
177 |
178 |
179 | playInput =
180 | Signal.mailbox ()
181 |
182 |
183 | playCount : Signal Int
184 | playCount =
185 | Signal.foldp (\_ total -> total + 1) 0 playInput.signal
186 |
187 |
188 | deadLetter =
189 | Signal.mailbox ()
190 |
191 |
192 | visualSignal : Signal { scale : Scale, tonic : Tonic }
193 | visualSignal =
194 | Signal.map2 (\t s -> { tonic = t, scale = s }) tonicInput.signal scaleInput.signal
195 |
196 |
197 | musicSignal =
198 | Signal.map3 (,,) visualSignal (playCount) (Time.every 350.0)
199 |
200 |
201 |
202 | -- Render
203 |
204 |
205 | checkboxWithLabel : String -> (Bool -> Signal.Message) -> Bool -> Element
206 | checkboxWithLabel label handler checked =
207 | container 70 30 middle
208 | <| flow
209 | right
210 | [ checkbox handler checked
211 | , leftAligned (Text.monospace (Text.fromString label))
212 | ]
213 |
214 |
215 | onOff : Signal.Address a -> a -> Bool -> Signal.Message
216 | onOff addr toSend checked =
217 | if checked then
218 | Signal.message addr toSend
219 | else
220 | Signal.message deadLetter.address ()
221 |
222 |
223 | tonicBoxes tonic =
224 | let
225 | box t =
226 | checkboxWithLabel (getTonicLabel t) (onOff tonicInput.address t) (t == tonic)
227 | in
228 | flow right <| List.map box tonics
229 |
230 |
231 | scaleBoxes scale =
232 | let
233 | box s =
234 | checkboxWithLabel (getScaleLabel s) (onOff scaleInput.address s) (s == scale)
235 | in
236 | flow right <| List.map box musicalScales
237 |
238 |
239 | playButton vmodel mmodel =
240 | button
241 | (Signal.message playInput.address ())
242 | (if (isPlaying mmodel.state) then
243 | "Stop"
244 | else
245 | "Play"
246 | )
247 |
248 |
249 | render ( vmodel, mmodel ) =
250 | flow
251 | down
252 | [ tonicBoxes vmodel.tonic
253 | , scaleBoxes vmodel.scale
254 | , playButton vmodel mmodel
255 | ]
256 |
257 |
258 |
259 | -- MAIN
260 |
261 |
262 | mainMusic =
263 | Signal.foldp updateMusic musicModel musicSignal |> Signal.filter (\m -> m.changed || (isPlaying m.state)) musicModel
264 |
265 |
266 | main =
267 | Signal.map render (Signal.map2 (,) visualSignal mainMusic)
268 |
--------------------------------------------------------------------------------
/examples/Visual.elm:
--------------------------------------------------------------------------------
1 | module Visual (..) where
2 |
3 | import Color exposing (rgb, black, darkRed, lightRed, orange)
4 | import Debug exposing (crash)
5 | import Graphics.Collage exposing (..)
6 | import Graphics.Element exposing (..)
7 | import Graphics.Input exposing (button)
8 | import Graphics.Input.Field exposing (Content, Direction(..), Selection, defaultStyle, field, noContent)
9 | import Maybe exposing (withDefault)
10 | import Mouse
11 | import Signal
12 | import Time
13 | import Transform2D exposing (matrix, translation)
14 | import WebAudio exposing (..)
15 | import Window
16 |
17 |
18 | doOrDie f lst err =
19 | let
20 | x =
21 | f lst
22 | in
23 | case x of
24 | Just x' ->
25 | x'
26 |
27 | Nothing ->
28 | crash err
29 |
30 |
31 | headOrDie =
32 | doOrDie List.head
33 |
34 |
35 | tailOrDie =
36 | doOrDie List.tail
37 |
38 |
39 |
40 | -- Models
41 |
42 |
43 | analyser =
44 | createAnalyserNode DefaultContext |> connectNodes (getDestinationNode DefaultContext) 0 0
45 |
46 |
47 | filters =
48 | let
49 | frequencies =
50 | [ 31.25, 62.5, 125, 250, 500, 1000, 2000, 4000, 8000, 16000 ]
51 |
52 | makeFilter f =
53 | createBiquadFilterNode DefaultContext
54 | |> setFilterType Peaking
55 | |> tapNode .frequency (\freq -> setValue f freq)
56 |
57 | rlst =
58 | List.map makeFilter (List.reverse frequencies)
59 |
60 | end =
61 | headOrDie rlst "no filters"
62 | |> setFilterType HighShelf
63 | |> connectNodes analyser 0 0
64 |
65 | lst =
66 | List.reverse <| List.scanl (\c p -> connectNodes p 0 0 c) end (tailOrDie rlst "no filters")
67 | in
68 | case lst of
69 | x :: xs ->
70 | (setFilterType LowShelf x) :: xs
71 |
72 | [] ->
73 | []
74 |
75 |
76 | mediaStream =
77 | createHiddenMediaElementAudioSourceNode DefaultContext
78 | |> setMediaElementIsLooping True
79 | |> connectNodes (headOrDie filters "no filters") 0 0
80 |
81 |
82 | sliderSize =
83 | { w = 20.0, h = 100.0 }
84 |
85 |
86 | slidersState =
87 | { dimensions = ( 0, 0 )
88 | , dragging = False
89 | , lastPosition = ( 0, 0 )
90 | , selected = Nothing
91 | , scale = 1.0
92 | , move = 0.0
93 | , changes = False
94 | }
95 |
96 |
97 | isSelectedSlider idx state =
98 | withDefault False <| Maybe.map (\( i, _ ) -> i == idx) state.selected
99 |
100 |
101 | sliderValueClamp =
102 | (max -40.0) << (min 40.0)
103 |
104 |
105 | controlState =
106 | { playing = False
107 | , loadTrack = False
108 | , url = Content "https://soundcloud.com/failemotions/gravity-instrumental" (Selection 0 0 Forward)
109 | , btnCnt = 0
110 | }
111 |
112 |
113 |
114 | -- Update
115 |
116 |
117 | scaleToFit { w, h } desiredW desiredH =
118 | min (desiredW / w) (desiredH / h)
119 |
120 |
121 | updateSlidersVisual ( w', h' ) isdown pos state =
122 | let
123 | ( w, h ) =
124 | ( toFloat w', toFloat h' )
125 |
126 | sliderScale =
127 | scaleToFit sliderSize (w / 10.0 * 0.9) (h / 2.0 * 0.9)
128 |
129 | sliderMove =
130 | sliderScale * sliderSize.w / 0.9
131 |
132 | updates =
133 | updateSliders ( ( w', h' ), isdown, pos ) { state | dimensions = ( w', h' ), scale = sliderScale, move = sliderMove }
134 | in
135 | { updates | changes = True }
136 |
137 |
138 | handleHitTest x y s f =
139 | let
140 | handleLeft =
141 | (0 - sliderSize.w / 2.0) * s
142 |
143 | handleRight =
144 | handleLeft + sliderSize.w * s
145 |
146 | posy =
147 | getValue f.gain
148 |
149 | handleBottom =
150 | (posy - sliderSize.w / 2.0) * s
151 |
152 | handleTop =
153 | handleBottom + sliderSize.w * s
154 | in
155 | handleLeft <= x && x <= handleRight && handleBottom <= y && y <= handleTop
156 |
157 |
158 | selectSlider ( w', h' ) ( x', y' ) state =
159 | let
160 | ( w, h ) =
161 | ( toFloat w', toFloat h' )
162 |
163 | x =
164 | toFloat x' - w / 2.0 + state.move * 5.0
165 |
166 | y =
167 | h / 4.0 - toFloat y'
168 |
169 | lst =
170 | List.indexedMap (,) filters
171 |
172 | filtered =
173 | List.filter (\( i, f ) -> handleHitTest (x - (toFloat i + 0.5) * state.move) y state.scale f) lst
174 |
175 | selected =
176 | List.head filtered
177 | in
178 | updateSelectedSlider ( x', y' ) { state | selected = selected, dragging = True }
179 |
180 |
181 | updateSelectedSlider pos state =
182 | case state.selected of
183 | Just ( _, slider ) ->
184 | let
185 | currentVal =
186 | getValue slider.gain
187 |
188 | delta =
189 | (toFloat (snd state.lastPosition - snd pos)) / state.scale
190 |
191 | newVal =
192 | sliderValueClamp (currentVal + delta)
193 |
194 | _ =
195 | setValue newVal slider.gain
196 | in
197 | { state | lastPosition = pos, changes = True }
198 |
199 | Nothing ->
200 | { state | lastPosition = pos, changes = False }
201 |
202 |
203 | disableSelectedSlider pos state =
204 | if state.dragging then
205 | { state | lastPosition = pos, selected = Nothing, dragging = False, changes = True }
206 | else
207 | { state | lastPosition = pos, changes = False }
208 |
209 |
210 | updateSliders ( dim, isdown, pos ) state =
211 | if dim /= state.dimensions then
212 | updateSlidersVisual dim isdown pos state
213 | else if isdown then
214 | (if state.dragging then
215 | (updateSelectedSlider pos state)
216 | else
217 | (selectSlider dim pos state)
218 | )
219 | else
220 | disableSelectedSlider pos state
221 |
222 |
223 | updateTrack =
224 | playMediaElement << withDefault mediaStream << Maybe.map (\url -> setMediaElementSource url mediaStream)
225 |
226 |
227 | pauseMusic state =
228 | let
229 | _ =
230 | pauseMediaElement mediaStream
231 | in
232 | { state | playing = False, loadTrack = False }
233 |
234 |
235 | playMusic state =
236 | { state | loadTrack = True, playing = True }
237 |
238 |
239 | toggleMusic state =
240 | if state.playing then
241 | pauseMusic state
242 | else
243 | playMusic state
244 |
245 |
246 | updateControls ( cnt, url ) state =
247 | if (cnt /= state.btnCnt) then
248 | toggleMusic { state | btnCnt = cnt, url = url }
249 | else
250 | { state | url = url, loadTrack = False }
251 |
252 |
253 |
254 | -- Input
255 |
256 |
257 | slidersInput =
258 | Signal.map3
259 | (,,)
260 | Window.dimensions
261 | Mouse.isDown
262 | Mouse.position
263 |
264 |
265 | playButtonInput =
266 | Signal.mailbox controlState.playing
267 |
268 |
269 | playButtonCount =
270 | Signal.foldp (\_ total -> total + 1) 0 playButtonInput.signal
271 |
272 |
273 | urlFieldInput =
274 | Signal.mailbox controlState.url
275 |
276 |
277 | controlInput =
278 | Signal.map2
279 | (,)
280 | playButtonCount
281 | urlFieldInput.signal
282 |
283 |
284 | port soundUrl : Signal (Maybe String)
285 |
286 |
287 |
288 | -- Render
289 |
290 |
291 | renderSlider val selected =
292 | let
293 | handleColor =
294 | if selected then
295 | lightRed
296 | else
297 | darkRed
298 | in
299 | [ rect (sliderSize.w / 4.0) (sliderSize.h - sliderSize.w) |> filled black
300 | , rect sliderSize.w sliderSize.w |> filled handleColor |> moveY val
301 | ]
302 |
303 |
304 | renderSliders w h state =
305 | let
306 | slider idx filter =
307 | let
308 | x =
309 | (toFloat idx + 0.5) * state.move
310 |
311 | val =
312 | getValue filter.gain
313 |
314 | selected =
315 | isSelectedSlider idx state
316 | in
317 | groupTransform (matrix state.scale 0 0 state.scale x 0) (renderSlider val selected)
318 | in
319 | groupTransform (translation (0 - state.move * 5.0) (h / 2.0)) <| List.indexedMap slider filters
320 |
321 |
322 | renderControls w state =
323 | let
324 | btn =
325 | button
326 | (Signal.message playButtonInput.address (not state.playing))
327 | (if state.playing then
328 | "Pause"
329 | else
330 | "Play"
331 | )
332 |
333 | url =
334 | field defaultStyle (Signal.message urlFieldInput.address) "SoundCloud Permalink URL" state.url |> width (round (w / 2) - widthOf btn)
335 | in
336 | beside btn url |> toForm
337 |
338 |
339 | renderAnalyser w h freqdata =
340 | let
341 | barWidth =
342 | w / (toFloat << List.length) freqdata
343 |
344 | draw idx datum =
345 | let
346 | barHeight =
347 | h * toFloat datum / 255.0
348 | in
349 | rect barWidth barHeight |> filled orange |> move ( (toFloat idx + 0.5) * barWidth, (barHeight - h) / 2.0 )
350 | in
351 | groupTransform (translation (0 - w / 2.0) (h / -2.0)) <| List.indexedMap draw freqdata
352 |
353 |
354 | render ( w', h' ) sliderState controlState freqdata _ =
355 | let
356 | ( w, h ) =
357 | ( toFloat w', toFloat h' )
358 |
359 | halfw =
360 | w / 2.0
361 |
362 | halfh =
363 | h / 2.0
364 |
365 | quarterh =
366 | halfh / 2.0
367 | in
368 | collage
369 | w'
370 | h'
371 | [ rect w halfh |> filled (rgb 34 34 34) |> moveY quarterh
372 | , rect w halfh |> filled black |> moveY (0 - quarterh)
373 | , renderSliders w halfh sliderState
374 | , renderAnalyser w halfh freqdata
375 | , renderControls w controlState
376 | ]
377 |
378 |
379 |
380 | -- Main
381 |
382 |
383 | mainMediaStream =
384 | Signal.map updateTrack soundUrl
385 |
386 |
387 | mainSliders =
388 | Signal.foldp updateSliders slidersState slidersInput |> Signal.filter (\m -> m.changes) slidersState
389 |
390 |
391 | mainControls =
392 | Signal.foldp updateControls controlState controlInput
393 |
394 |
395 | port fetchSoundUrl : Signal String
396 | port fetchSoundUrl =
397 | Signal.map (\{ url } -> url.string) (Signal.filter (\{ loadTrack } -> loadTrack) controlState mainControls)
398 |
399 |
400 | main =
401 | Signal.map5
402 | render
403 | Window.dimensions
404 | (mainSliders)
405 | (mainControls)
406 | ((Signal.map (\_ -> getByteFrequencyData analyser) (Time.every 50.0)))
407 | (mainMediaStream)
408 |
--------------------------------------------------------------------------------
/Native/WebAudio.js:
--------------------------------------------------------------------------------
1 | var _trotha01$elm_webaudio$Native_WebAudio = function() {
2 |
3 | var values = {};
4 |
5 | /* AudioContext */
6 | function createStandardContext() {
7 | return new (window.AudioContext || window.webkitAudioContext)();
8 | }
9 |
10 | function createAudioContext(context) {
11 | return {ctor: "AudioContext", _context: context};
12 | }
13 |
14 | values.createContext = function() {
15 | return createAudioContext(createStandardContext());
16 | };
17 |
18 | var defaultContext = null;
19 | function extractContext(context) {
20 | if (context.ctor === "DefaultContext")
21 | return defaultContext || (defaultContext = createStandardContext());
22 | return context._context;
23 | }
24 |
25 | values.getSampleRate = function(context) {
26 | return extractContext(context).sampleRate;
27 | };
28 |
29 | values.getCurrentTime = function(context) {
30 | return extractContext(context).currentTime;
31 | };
32 |
33 | values.createOfflineContext = F3(function(channels, length, sampleRate) {
34 | var context = new (window.OfflineAudioContext || window.webkitOfflineAudioContext)(channels, length, sampleRate);
35 | var signal = Signal.constant(Maybe.Nothing);
36 | context.oncomplete = function(e) {
37 | elm.notify(signal.id, Maybe.Just(values.createAudioBuffer(e.renderedBuffer)));
38 | };
39 | return {_:{}, _context: createAudioContext(context), _signal: signal};
40 | });
41 |
42 | values.startOfflineRendering = function(offlineContext) {
43 | offlineContext._context._context.startRendering();
44 | return offlineContext;
45 | };
46 |
47 |
48 |
49 | /* AudioParam */
50 | values.setValue = F2(function(val, param) {
51 | param._node._node[param._0].value = val;
52 | return param;
53 | });
54 |
55 | values.getValue = function(param) {
56 | return param._node._node[param._0].value;
57 | };
58 |
59 | values.setValueAtTime = F3(function(value, time, param) {
60 | param._node._node[param._0].setValueAtTime(value, time);
61 | return param;
62 | });
63 |
64 | values.linearRampToValue = F3(function(value, time, param) {
65 | param._node._node[param._0].linearRampToValueAtTime(value, time);
66 | return param;
67 | });
68 |
69 | values.exponentialRampToValue = F3(function(value, time, param) {
70 | param._node._node[param._0].exponentialRampToValueAtTime(value, time);
71 | return param;
72 | });
73 |
74 | values.setTargetAtTime = F4(function(target, starttime, constant, param) {
75 | param._node._node[param._0].setTargetAtTime(target, starttime, constant);
76 | return param;
77 | });
78 |
79 | values.setValueCurveAtTime = F4(function(curve, starttime, duration, param) {
80 | param._node._node[param._0].setValueCurveAtTime(toArray(curve), starttime, duration);
81 | return param;
82 | });
83 |
84 | values.cancelScheduledValues = F2(function(time, param) {
85 | param._node._node[param._0].cancelScheduledValues(time);
86 | return param;
87 | });
88 |
89 |
90 |
91 | /* AudioBuffer */
92 | values.createAudioBuffer = function(buffer) {
93 | return {ctor: "AudioBuffer", _buffer: buffer};
94 | };
95 |
96 | values.loadAudioBufferFromUrl = F2(function(context, url) {
97 | var signal = Signal.constant(Maybe.Nothing);
98 | var request = new XMLHttpRequest();
99 | request.open('GET', url, true);
100 | request.responseType = 'arraybuffer';
101 | request.onload = function() {
102 | extractContext(context).decodeAudioData(request.response, function(buffer) {
103 | elm.notify(signal.id, Maybe.Just(values.createAudioBuffer(buffer)));
104 | });
105 | };
106 | request.send();
107 | return signal;
108 | });
109 |
110 | values.getBufferSampleRate = function(buffer) {
111 | return buffer._buffer.sampleRate;
112 | };
113 |
114 | values.getBufferLength = function(buffer) {
115 | return buffer._buffer.length;
116 | };
117 |
118 | values.getBufferDuration = function(buffer) {
119 | return buffer._buffer.duration;
120 | };
121 |
122 | values.getBufferNumberOfChannels = function(buffer) {
123 | return buffer._buffer.numberOfChannels;
124 | };
125 |
126 | values.getChannelData = F2(function(channel, buffer) {
127 | return fromArray(buffer._buffer.getChannelData(channel));
128 | });
129 |
130 | values.getChannelDataSlice = F4(function(channel, start, length, buffer) {
131 | if (!buffer._slice || buffer._slice.length != length)
132 | buffer._slice = new Float32Array(length);
133 | buffer._buffer.copyFromChannel(buffer._slice, channel, start);
134 | return fromArray(buffer._buffer);
135 | });
136 |
137 | values.setChannelDataSlice = F4(function(channel, start, data, buffer) {
138 | buffer._buffer.copyToChannel(toArray(data), channel, start);
139 | return buffer;
140 | });
141 |
142 |
143 |
144 | /* Audio Node Utility Functions*/
145 | function buildAudioNode(node) {
146 | return {_:{}, inputs:node.numberOfInputs, outputs:node.numberOfOutputs, _node:node};
147 | }
148 |
149 | function buildAudioParam(externalName, internalName, node) {
150 | node[externalName] = {ctor: "AudioParam", _0: internalName, _node: node};
151 | }
152 |
153 | function buildGetter(externalName, internalName) {
154 | values[externalName] = function(node) {
155 | return node._node[internalName];
156 | };
157 | }
158 |
159 | function buildSetter(externalName, internalName) {
160 | values[externalName] = F2(function(value, node) {
161 | node._node[internalName] = value;
162 | return node;
163 | });
164 | }
165 |
166 | function buildProperty(externalName, internalName) {
167 | buildGetter('get' + externalName, internalName);
168 | buildSetter('set' + externalName, internalName);
169 | }
170 |
171 |
172 |
173 | /* Audio Node */
174 | values.connectNodes = F4(function(destination, outputIdx, inputIdx, source) {
175 | source._node.connect(destination._node, outputIdx, inputIdx);
176 | return source;
177 | });
178 |
179 | values.connectToParam = F3(function(destination, outputIdx, source) {
180 | source._node.connect(destination.param, outputIdx);
181 | return source;
182 | });
183 |
184 | buildProperty('ChannelCount', 'channelCount');
185 |
186 | values.getChannelCountMode = function(node) {
187 | switch (node._node.channelCountMode) {
188 | case "max":
189 | return elm.WebAudio.values.Max;
190 | case "clamped-max":
191 | return elm.WebAudio.values.ClampedMax;
192 | case "explicit":
193 | return elm.WebAudio.values.Explicit;
194 | }
195 | };
196 |
197 | values.setChannelCountMode = F2(function(mode, node) {
198 | switch (mode.ctor) {
199 | case "Max":
200 | node._node.channelCountMode = "max";
201 | break;
202 | case "ClampedMax":
203 | node._node.channelCountMode = "clamped-max";
204 | break;
205 | case "Explicit":
206 | node._node.channelCountMode = "explicit";
207 | break;
208 | }
209 | return node;
210 | });
211 |
212 | values.getChannelInterpretation = function(node) {
213 | switch (node._node.channelInterpretation) {
214 | case "speakers":
215 | return elm.WebAudio.values.Speakers;
216 | case "discrete":
217 | return elm.WebAudio.values.Discrete;
218 | }
219 | };
220 |
221 | values.setChannelInterpretation = F2(function(mode, node) {
222 | switch (mode.ctor) {
223 | case "Speakers":
224 | node._node.channelInterpretation = "speakers";
225 | break;
226 | case "Discrete":
227 | node._node.channelInterpretation = "discrete";
228 | break;
229 | }
230 | return node;
231 | });
232 |
233 |
234 |
235 | /* Analyser Node */
236 | values.createAnalyserNode = function(context) {
237 | var node = extractContext(context).createAnalyser();
238 | return buildAudioNode(node);
239 | };
240 |
241 | buildProperty('FFTSize', 'fftSize');
242 | buildProperty('MaxDecibels', 'maxDecibels');
243 | buildProperty('MinDecibels', 'minDecibels');
244 | buildProperty('SmoothingConstant', 'smoothingTimeConstant');
245 |
246 | values.getByteFrequencyData = function(node) {
247 | if (!node._bFreq || node._bFreq.length != node._node.frequencyBinCount)
248 | node._bFreq = new Uint8Array(node._node.frequencyBinCount);
249 | node._node.getByteFrequencyData(node._bFreq);
250 | return fromArray(node._bFreq);
251 | };
252 |
253 | values.getByteTimeDomainData = function(node) {
254 | if (!node._bTime || node._bTime.length != node._node.fftSize)
255 | node._bTime = new Uint8Array(node._node.fftSize);
256 | node._node.getByteTimeDomainData(node._bTime);
257 | return fromArray(node._bTime);
258 | };
259 |
260 | values.getFloatFrequencyData = function(node) {
261 | if (!node._fFreq || node._fFreq.length != node._node.frequencyBinCount)
262 | node._fFreq = new Float32Array(node._node.frequencyBinCount);
263 | node._node.getFloatFrequencyData(node._fFreq);
264 | return fromArray(node._fFreq);
265 | };
266 |
267 | values.getFloatTimeDomainData = function(node) {
268 | if (!node._fTime || node._fTime.length != node._node.fftSize)
269 | node._fTime = new Float32Array(node._node.fftSize);
270 | node._node.getFloatTimeDomainData(node._fTime);
271 | return fromArray(node._fTime);
272 | };
273 |
274 |
275 |
276 | /* Audio Buffer Source Node */
277 | values.createAudioBufferSourceNode = function(context) {
278 | var node = extractContext(context).createBufferSource();
279 | var ret = buildAudioNode(node);
280 | buildAudioParam('playbackRate', 'playbackRate', ret);
281 |
282 | var signal = Signal.constant(false);
283 | ret._ended = signal;
284 | node.onended = function() {
285 | elm.notify(signal.id, true);
286 | };
287 |
288 | return ret;
289 | };
290 |
291 | buildGetter('AudioBufferFromNode', 'buffer');
292 | values.setAudioBufferForNode = F2(function(value, node) {
293 | node._node.buffer = value._buffer;
294 | return node;
295 | });
296 |
297 | buildProperty('AudioBufferIsLooping', 'loop');
298 | buildProperty('AudioBufferLoopStart', 'loopStart');
299 | buildProperty('AudioBufferLoopEnd', 'loopEnd');
300 |
301 | values.startAudioBufferNode = F4(function(when, offset, duration, node) {
302 | if (duration.ctor == "Nothing")
303 | node._node.start(when, offset);
304 | else
305 | node._node.start(when, offset, duration._0);
306 | return node;
307 | });
308 |
309 | values.stopAudioBufferNode = F2(function(when, node) {
310 | node._node.stop(when);
311 | return node;
312 | });
313 |
314 |
315 |
316 | /* AudioDestinationNode */
317 | values.getDestinationNode = function(context) {
318 | var node = extractContext(context).destination;
319 | return buildAudioNode(node);
320 | }
321 |
322 | buildGetter('MaxChannelCount', 'maxChannelCount');
323 |
324 |
325 |
326 | /* TODO: Audio Worker Node */
327 |
328 |
329 |
330 | /* Biquad Filter Node */
331 | values.createBiquadFilterNode = function(context) {
332 | var node = extractContext(context).createBiquadFilter();
333 | var ret = buildAudioNode(node);
334 | buildAudioParam('frequency', 'frequency', ret);
335 | buildAudioParam('detune', 'detune', ret);
336 | buildAudioParam('q', 'q', ret);
337 | buildAudioParam('gain', 'gain', ret);
338 | return ret;
339 | }
340 |
341 | values.getFilterType = function(node) {
342 | switch (node._node.type) {
343 | case "lowpass":
344 | return elm.WebAudio.values.LowPass;
345 | case "highpass":
346 | return elm.WebAudio.values.HighPass;
347 | case "bandpass":
348 | return elm.WebAudio.values.BandPass;
349 | case "lowshelf":
350 | return elm.WebAudio.values.LowShelf;
351 | case "highshelf":
352 | return elm.WebAudio.values.HighShelf;
353 | case "peaking":
354 | return elm.WebAudio.values.Peaking;
355 | case "notch":
356 | return elm.WebAudio.values.Notch;
357 | case "allpass":
358 | return elm.WebAudio.values.AllPass;
359 | }
360 | }
361 |
362 | values.setFilterType = F2(function(type, node) {
363 | switch (type.ctor) {
364 | case "LowPass":
365 | node._node.type = "lowpass";
366 | break;
367 | case "HighPass":
368 | node._node.type = "highpass";
369 | break;
370 | case "BandPass":
371 | node._node.type = "bandpass";
372 | break;
373 | case "LowShelf":
374 | node._node.type = "lowshelf";
375 | break;
376 | case "HighShelf":
377 | node._node.type = "highshelf";
378 | break;
379 | case "Peaking":
380 | node._node.type = "peaking";
381 | break;
382 | case "Notch":
383 | node._node.type = "notch";
384 | break;
385 | case "AllPass":
386 | node._node.type = "allpass";
387 | break;
388 | }
389 | return node;
390 | });
391 |
392 |
393 |
394 | /* ChannelMergerNode */
395 | values.createChannelMergerNode = F2(function(context, numberOfInputs) {
396 | var node = extractContext(context).createChannelMerger(numberOfInputs);
397 | return buildAudioNode(node);
398 | });
399 |
400 |
401 |
402 | /* ChannelSplitterNode */
403 | values.createChannelSplitterNode = F2(function(context, numberOfOutputs) {
404 | var node = extractContext(context).createChannelSplitter(numberOfOutputs);
405 | return buildAudioNode(node);
406 | });
407 |
408 |
409 |
410 | /* DelayNode */
411 | values.createDelayNode = F2(function(context, maxDelayTime) {
412 | var node = extractContext(context).createDelay(maxDelayTime);
413 | var ret = buildAudioNode(node);
414 | buildAudioParam('delayTime', 'delayTime', ret);
415 | return ret;
416 | });
417 |
418 |
419 |
420 | /* DynamicsCompressorNode */
421 | values.createDynamicsCompressorNode = function(context) {
422 | var node = extractContext(context).createDynamicsCompressor();
423 | var ret = buildAudioNode(node);
424 | buildAudioParam('threshold', 'threshold', ret);
425 | buildAudioParam('knee', 'knee', ret);
426 | buildAudioParam('ratio', 'ratio', ret);
427 | buildAudioParam('reduction', 'reduction', ret);
428 | buildAudioParam('attack', 'attack', ret);
429 | buildAudioParam('release', 'release', ret);
430 | return ret;
431 | };
432 |
433 |
434 |
435 | /* GainNode */
436 | values.createGainNode = function(context) {
437 | var node = extractContext(context).createGain();
438 | var ret = buildAudioNode(node);
439 | buildAudioParam('gain', 'gain', ret);
440 | return ret;
441 | };
442 |
443 |
444 |
445 | /* MediaElementAudioSourceNode */
446 | values.createHiddenMediaElementAudioSourceNode = function(context) {
447 | var element = new Audio();
448 | element.crossOrigin = "anonymous";
449 | return A2(values.createMediaElementAudioSourceNode, context, element);
450 | };
451 |
452 | values.createMediaElementAudioSourceNode = F2(function(context, element) {
453 | var node = extractContext(context).createMediaElementSource(element);
454 | var ret = buildAudioNode(node);
455 | ret._element = element;
456 | return ret;
457 | });
458 |
459 | values.getMediaElementIsLooping = function(node) {
460 | return node._element.loop;
461 | };
462 |
463 | values.setMediaElementIsLooping = F2(function(loop, node) {
464 | node._element.loop = loop;
465 | return node;
466 | });
467 |
468 | values.getMediaElementSource = function(node) {
469 | return node._element.src;
470 | };
471 |
472 | values.setMediaElementSource = F2(function(source, node) {
473 | node._element.src = source;
474 | node._element.load();
475 | return node;
476 | });
477 |
478 | values.playMediaElement = function(node) {
479 | node._element.play();
480 | return node;
481 | };
482 |
483 | values.pauseMediaElement = function(node) {
484 | node._element.pause();
485 | return node;
486 | };
487 |
488 |
489 | /* MediaStreamAudioSourceNode */
490 | values.createMediaStreamAudioSourceNode = F2(function(context, source) {
491 | var node = extractContext(context).createMediaStreamSource(source);
492 | return buildAudioNode(node);
493 | });
494 |
495 |
496 | /* OscillatorNode */
497 | function setOscillatorWaveType(type, node) {
498 | switch (type.ctor) {
499 | case "Sine":
500 | node._node.type = "sine";
501 | break;
502 | case "Square":
503 | node._node.type = "square";
504 | break;
505 | case "Sawtooth":
506 | node._node.type = "sawtooth";
507 | break;
508 | case "Triangle":
509 | node._node.type = "triangle";
510 | break;
511 | }
512 | return node;
513 | }
514 |
515 | values.createOscillatorNode = F2(function(context, type) {
516 | var node = extractContext(context).createOscillator();
517 | var ret = buildAudioNode(node);
518 | buildAudioParam('frequency', 'frequency', ret);
519 | buildAudioParam('detune', 'detune', ret);
520 | return setOscillatorWaveType(type, ret);
521 | });
522 |
523 | values.getOscillatorWaveType = function(node) {
524 | switch (node._node.type) {
525 | case "sine":
526 | return elm.WebAudio.values.Sine;
527 | case "square":
528 | return elm.WebAudio.values.Square;
529 | case "sawtooth":
530 | return elm.WebAudio.values.Sawtooth;
531 | case "triangle":
532 | return elm.WebAudio.values.Triangle;
533 | }
534 | };
535 |
536 | values.setOscillatorWaveType = F2(setOscillatorWaveType);
537 |
538 | values.startOscillator = F2(function(startTime, node) {
539 | node._node.start(startTime);
540 | return node;
541 | });
542 |
543 | values.stopOscillator = F2(function(stopTime, node) {
544 | node._node.stop(stopTime);
545 | return {ctor: '_Tuple0'};
546 | });
547 |
548 |
549 |
550 | /* PannerNode */
551 | values.createPannerNode = function(context) {
552 | var node = extractContext(context).createPanner();
553 | return buildAudioNode(node);
554 | };
555 |
556 | values.getPanningModel = function(node) {
557 | switch (node._node.panningModel) {
558 | case "equalpower":
559 | return elm.WebAudio.values.EqualPower;
560 | case "hrtf":
561 | return elm.WebAudio.values.HRTF;
562 | }
563 | };
564 |
565 | values.setPanningModel = F2(function(model, node) {
566 | switch (model.ctor) {
567 | case "EqualPower":
568 | node._node.panningModel = "equalpower";
569 | break;
570 | case "HRTF":
571 | node._node.panningModel = "hrtf";
572 | break;
573 | }
574 | return node;
575 | });
576 |
577 | values.getDistanceModel = function(node) {
578 | switch (node._node.distanceModel) {
579 | case "linear":
580 | return elm.WebAudio.values.Linear;
581 | case "inverse":
582 | return elm.WebAudio.values.Inverse;
583 | case "exponential":
584 | return elm.WebAudio.values.Exponential;
585 | }
586 | };
587 |
588 | values.setDistanceModel = F2(function(model, node) {
589 | switch (model.ctor) {
590 | case "Linear":
591 | node._node.distanceModel = "linear";
592 | break;
593 | case "Inverse":
594 | node._node.distanceModel = "inverse";
595 | break;
596 | case "Exponential":
597 | node._node.distanceModel = "exponential";
598 | break;
599 | }
600 | return node;
601 | });
602 |
603 | buildProperty('ReferenceDistance', 'refDistance');
604 | buildProperty('MaxDistance', 'maxDistance');
605 | buildProperty('RolloffFactor', 'rolloffFactor');
606 | buildProperty('ConeInnerAngle', 'coneInnerAngle');
607 | buildProperty('ConeOuterAngle', 'coneOuterAngle');
608 | buildProperty('ConeOuterGain', 'coneOuterGain');
609 |
610 | values.setPosition = F4(function(x, y, z, node) {
611 | node._node.setPosition(x, y, z);
612 | return node;
613 | });
614 |
615 | values.setOrientation = F4(function(x, y, z, node) {
616 | node._node.setOrientation(x, y, z);
617 | return node;
618 | });
619 |
620 | function setVelocity(){
621 | return F4(function(x, y, z, node) {
622 | node._node.setVelocity(x, y, z);
623 | return node;
624 | });
625 | }
626 |
627 | return values;
628 |
629 | // return elm.Native.WebAudio.values = values;
630 | }();
631 |
--------------------------------------------------------------------------------
/WebAudio.elm:
--------------------------------------------------------------------------------
1 | module WebAudio exposing (..)
2 |
3 | {-| A module for accessing the Web Audio API via Elm.
4 |
5 | # Getting Started
6 |
7 | First, you will need an `AudioContext`. There are two types of contexts:
8 | a standard context (which outputs to the user's audio device - speakers,
9 | headphones, etc), and an "offline" context which renders audio to a buffer.
10 | It is fairly rare that you would need more than one standard context, and, so
11 | this library provides a convenience context called the `DefaultContext`. Think
12 | of the `DefaultContext` as a singleton for a standard context.
13 |
14 | I highly recommend you read through the Web Audio API documentation to
15 | familiarize yourself with the concepts. You can find the documentation here:
16 | http://webaudio.github.io/web-audio-api/
17 |
18 | @docs AudioContext, createContext, createOfflineContext, getSampleRate, getCurrentTime
19 |
20 | # Special Notes
21 |
22 | Most "set" functions take the object whose value is being set as the last
23 | parameter, and then the function returns that same object to facilitate
24 | function chaining using the "|>" operator.
25 |
26 | # Audio Params
27 |
28 | Most parameters for various Audio Nodes are actually "AudioParams". These
29 | allow you to either set a constant value, or schedule changes to the value at
30 | appropriate times. All times are relative to the AudioContext's current time.
31 | If you try to schedule a change for a time that has already passed, the change
32 | will take effect immediately.
33 |
34 | The documentation below will note if the node has any AudioParams that you can
35 | modify in the form of a bulleted list. These params can be accessed using the
36 | record notation. For example, a Biquad Filter Node has a "frequency" param. It
37 | could be accessed with: `node.frequency`
38 |
39 | @docs AudioParam, setValue, getValue, setValueAtTime, linearRampToValue, exponentialRampToValue, setTargetAtTime, setValueCurveAtTime, cancelScheduledValues
40 |
41 | # Adding missing functions
42 | @docs MediaStreamAudioSourceNode, OfflineAudioContext, startOfflineRendering
43 |
44 | # Audio Buffers
45 |
46 | An `AudioBuffer` stores audio data in memory in a PCM format with a range of
47 | -1 to 1. AudioBuffers may contain multiple channels. Typically, AudioBuffers
48 | are used for "short" audio clips (less than a minute) because the entire file
49 | must be loaded before the audio can be played. An HTMLMediaElement, such as
50 | the HTML `audio` tag handled by the MediaElementAudioSourceNode, is typically
51 | used for longer audio clips because it supports streaming.
52 |
53 | There are many ways to create AudioBuffers, some of which are beyond the scope
54 | of this library. However, this library does have a few functions available to
55 | load audio files into buffers. If you have a need to create an AudioBuffer in a
56 | way this library does not directly support, the Native library contains a
57 | function called `createAudioBuffer` that takes an AudioBuffer and returns an
58 | AudioBuffer compatible with this library.
59 |
60 | @docs AudioBuffer, getBufferSampleRate, getBufferLength, getBufferDuration, getBufferNumberOfChannels, getChannelData, getChannelDataSlice, setChannelDataSlice
61 |
62 | # Audio Nodes
63 |
64 | Once you have your AudioContext, you can begin to build your graph of Audio
65 | Nodes.
66 |
67 | @docs AudioNode, ChannelCountMode, ChannelInterpretation, connectNodes, connectToParam, getChannelCount, setChannelCount, getChannelCountMode, setChannelCountMode, getChannelInterpretation, setChannelInterpretation, tapNode
68 |
69 | # Analyser Nodes
70 |
71 | @docs AnalyserNode, createAnalyserNode, getFFTSize, setFFTSize, getMaxDecibels, setMaxDecibels, getMinDecibels, setMinDecibels, getSmoothingConstant, setSmoothingConstant, getByteFrequencyData, getByteTimeDomainData, getFloatFrequencyData, getFloatTimeDomainData
72 |
73 | # Audio Buffer Source Nodes
74 |
75 | @docs AudioBufferSourceNode, createAudioBufferSourceNode, getAudioBufferFromNode, setAudioBufferForNode, getAudioBufferIsLooping, setAudioBufferIsLooping, getAudioBufferLoopStart, setAudioBufferLoopStart, getAudioBufferLoopEnd, setAudioBufferLoopEnd, startAudioBufferNode, stopAudioBufferNode
76 |
77 | # Audio Destination Nodes
78 |
79 | Each Audio Context has only one Audio Destination Node.
80 |
81 | @docs AudioDestinationNode, getDestinationNode, getMaxChannelCount
82 |
83 | # Audio Worker Node
84 |
85 | These nodes are currently unimplemented.
86 |
87 | # Biquad Filter Nodes
88 |
89 | Biquad Filter Nodes have the following AudioParams:
90 |
91 | * frequency
92 | * detune
93 | * q
94 | * gain
95 |
96 | @docs BiquadFilterNode, BiquadFilterType, createBiquadFilterNode, getFilterType, setFilterType
97 |
98 | # Channel Merger Nodes
99 |
100 | @docs ChannelMergerNode, createChannelMergerNode
101 |
102 | # Channel Splitter Nodes
103 |
104 | @docs ChannelSplitterNode, createChannelSplitterNode
105 |
106 | # Convolver Nodes
107 |
108 | These nodes are currently unimplemented.
109 |
110 | # Delay Nodes
111 |
112 | Delay Nodes have the following AudioParams:
113 |
114 | * delayTime
115 |
116 | @docs DelayNode, createDelayNode
117 |
118 | # Dynamics Compressor Nodes
119 |
120 | Dynamics Compressor Nodes have the following AudioParams:
121 |
122 | * threshold
123 | * knee
124 | * ratio
125 | * reduction
126 | * attack
127 | * release
128 |
129 | @docs DynamicsCompressorNode, createDynamicsCompressorNode
130 |
131 | # Gain Nodes
132 |
133 | Gain Nodes have the following AudioParams:
134 |
135 | * gain
136 |
137 | @docs GainNode, createGainNode
138 |
139 | # Media Element Audio Source Nodes
140 |
141 | Media Element Audio Source Nodes connect HTMLMediaElements to the audio graph.
142 | This is the preferred way to connect a "long" audio file to the audio graph.
143 | HTMLMediaElements are things like the HTML video or audio tags, and creating
144 | these tags is a bit beyond the scope of this library. However, this library
145 | does have a convenience method for creating a "hidden" audio tag that will not
146 | be added to the page, but will load an audio file via the HTMLMediaElement
147 | interface thus gaining the benefits of streaming, etc.
148 |
149 | The Native library also includes a function called `createMediaElementSourceNode`
150 | that takes an instance of HTMLMediaElement (which you might get from doing a
151 | `document.getElementById()` or from creating an element with `document.createElement`)
152 | and returns a MediaElementAudioSourceNode. You could use this in your own code
153 | to create a MediaElementAudioSourceNode from an audio (or video) tag that you
154 | have created using other means.
155 |
156 | @docs MediaElementAudioSourceNode, createHiddenMediaElementAudioSourceNode, getMediaElementIsLooping, setMediaElementIsLooping, getMediaElementSource, setMediaElementSource, playMediaElement, pauseMediaElement
157 |
158 | # Media Stream Audio Destination Nodes
159 |
160 | These nodes are currently unimplemented.
161 |
162 | # Media Stream Audio Source Nodes
163 |
164 | These nodes are currently unimplemented.
165 |
166 | # Oscillator Nodes
167 |
168 | Oscillator Nodes have the following AudioParams:
169 |
170 | * frequency
171 | * detune
172 |
173 | @docs OscillatorNode, OscillatorWaveType, createOscillatorNode, getOscillatorWaveType, setOscillatorWaveType, startOscillator, stopOscillator
174 |
175 | # Panner Nodes
176 |
177 | @docs PannerNode, PanningModel, DistanceModel, createPannerNode, getPanningModel, setPanningModel, getDistanceModel, setDistanceModel, getReferenceDistance, setReferenceDistance, getMaxDistance, setMaxDistance, getRolloffFactor, setRolloffFactor, getConeInnerAngle, setConeInnerAngle, getConeOuterAngle, setConeOuterAngle, getConeOuterGain, setConeOuterGain, setPosition, setOrientation, setVelocity
178 |
179 | # Script Processor Nodes
180 |
181 | These nodes are deprecated and, thus, unimplemented. See: Audio Worker Nodes
182 |
183 | # Wave Shaper Nodes
184 |
185 | These nodes are currently unimplemented.
186 |
187 | -}
188 |
189 | import Native.WebAudio
190 | -- import UserMedia exposing (MediaStream)
191 |
192 |
193 | {-| The AudioContext
194 |
195 | Think of the `DefaultContext` as a global singleton. Just use the `DefaultContext`
196 | unless there's some reason you need to have more than one context.
197 | -}
198 | type AudioContext
199 | = AudioContext
200 | | DefaultContext
201 |
202 |
203 | {-| Create a new AudioContext
204 |
205 | Instead of creating a context, you can use the `DefaultContext`. The
206 | `DefaultContext` is like a singleton instance of an AudioContext and would be
207 | sufficient for most people.
208 | -}
209 | createContext : () -> AudioContext
210 | createContext =
211 | Native.WebAudio.createContext
212 |
213 |
214 | {-| Get the context's sample rate
215 | -}
216 | getSampleRate : AudioContext -> Float
217 | getSampleRate =
218 | Native.WebAudio.getSampleRate
219 |
220 |
221 | {-| Get the context's current time
222 | -}
223 | getCurrentTime : AudioContext -> Float
224 | getCurrentTime =
225 | Native.WebAudio.getCurrentTime
226 |
227 |
228 | {-| The OfflineAudioContext
229 | -}
230 | type alias OfflineAudioContext =
231 | { context : AudioContext, signal : (Maybe AudioBuffer) }
232 |
233 |
234 | {-| Create a new Offline AudioContext
235 |
236 | Parameters are: the number of channels, length of the buffer in sample frames,
237 | and the sample rate in Hz. Offline Audio Contexts return a record with two
238 | fields:
239 |
240 | * returnedValue.context is the AudioContext
241 | * returnedValue.signal is a signal that is raised when the Offline Audio
242 | Context has finished rendering audio to the AudioBuffer
243 | -}
244 | createOfflineContext : Int -> Int -> Float -> OfflineAudioContext
245 | createOfflineContext =
246 | Native.WebAudio.createOfflineContext
247 |
248 |
249 | {-| Begin rendering audio in an Offline Audio Context
250 |
251 | When rendering has finished, the context.signal `Signal` will raise.
252 | -}
253 | startOfflineRendering : OfflineAudioContext -> OfflineAudioContext
254 | startOfflineRendering =
255 | Native.WebAudio.startOfflineRendering
256 |
257 |
258 | {-| AudioParams
259 |
260 | An AudioParam is used in a lot of places to allow you to either set a static
261 | value (such as a frequency, gain, etc), or to schedule changes over time.
262 | -}
263 | type AudioParam
264 | = AudioParam String
265 |
266 |
267 | {-| Set the static value of the param
268 | -}
269 | setValue : Float -> AudioParam -> AudioParam
270 | setValue =
271 | Native.WebAudio.setValue
272 |
273 |
274 | {-| Get the current value of the param
275 | -}
276 | getValue : AudioParam -> Float
277 | getValue =
278 | Native.WebAudio.getValue
279 |
280 |
281 | {-| Schedule the AudioParam to change values at a specific time
282 | -}
283 | setValueAtTime : Float -> Float -> AudioParam -> AudioParam
284 | setValueAtTime =
285 | Native.WebAudio.setValueAtTime
286 |
287 |
288 | {-| Schedule the AudioParam to linearly ramp to a new value, finishing at the
289 | specified time.
290 | -}
291 | linearRampToValue : Float -> Float -> AudioParam -> AudioParam
292 | linearRampToValue =
293 | Native.WebAudio.linearRampToValue
294 |
295 |
296 | {-| Schedule the AudioParam to exponentially ramp to a new value, finishing at
297 | the specified time.
298 | -}
299 | exponentialRampToValue : Float -> Float -> AudioParam -> AudioParam
300 | exponentialRampToValue =
301 | Native.WebAudio.exponentialRampToValue
302 |
303 |
304 | {-| Schedule the AudioParam to exponentially approach the target, starting at
305 | the specified time. The "constant" determines how quickly the value changes
306 | with the value changing roughly 63.2% in the first time constant.
307 | -}
308 | setTargetAtTime : Float -> Float -> Float -> AudioParam -> AudioParam
309 | setTargetAtTime =
310 | Native.WebAudio.setTargetAtTime
311 |
312 |
313 | {-| Schedule a curve of values to start at the given time and run for the
314 | specified duration. Each value will take effect for N / duration seconds.
315 | -}
316 | setValueCurveAtTime : List Float -> Float -> Float -> AudioParam -> AudioParam
317 | setValueCurveAtTime =
318 | Native.WebAudio.setValueCurveAtTime
319 |
320 |
321 | {-| Cancel all scheduled changes at and after the specified time.
322 | -}
323 | cancelScheduledValues : Float -> AudioParam -> AudioParam
324 | cancelScheduledValues =
325 | Native.WebAudio.cancelScheduledValues
326 |
327 |
328 | {-| AudioBuffers
329 | -}
330 | type AudioBuffer
331 | = AudioBuffer
332 |
333 |
334 | {- Load an Audio Buffer from a URL
335 | loadAudioBufferFromUrl : AudioContext -> String -> Signal (Maybe AudioBuffer)
336 | loadAudioBufferFromUrl =
337 | Native.WebAudio.loadAudioBufferFromUrl
338 | -}
339 |
340 |
341 | {-| Retrieve the sample rate of the AudioBuffer
342 | -}
343 | getBufferSampleRate : AudioBuffer -> Float
344 | getBufferSampleRate =
345 | Native.WebAudio.getBufferSampleRate
346 |
347 |
348 | {-| Get the length of the AudioBuffer in sample frames
349 | -}
350 | getBufferLength : AudioBuffer -> Int
351 | getBufferLength =
352 | Native.WebAudio.getBufferLength
353 |
354 |
355 | {-| Get the duration of the AudioBuffer in seconds
356 | -}
357 | getBufferDuration : AudioBuffer -> Float
358 | getBufferDuration =
359 | Native.WebAudio.getBufferDuration
360 |
361 |
362 | {-| Retrieve the number of channels in the AudioBuffer
363 | -}
364 | getBufferNumberOfChannels : AudioBuffer -> Int
365 | getBufferNumberOfChannels =
366 | Native.WebAudio.getBufferNumberOfChannels
367 |
368 |
369 | {-| Get the buffer's data for the specified channel into an array
370 | -}
371 | getChannelData : Int -> AudioBuffer -> List Float
372 | getChannelData =
373 | Native.WebAudio.getChannelData
374 |
375 |
376 | {-| Get a slice of channel data from the buffer.
377 |
378 | This is more efficient than getting all of the channel data if you only need
379 | a small chunk of it. Parameters are:
380 |
381 | * Channel number, starting with 0
382 | * What sample frame to start with
383 | * How many frames to return
384 | * The AudioBuffer
385 | -}
386 | getChannelDataSlice : Int -> Int -> Int -> AudioBuffer -> List Float
387 | getChannelDataSlice =
388 | Native.WebAudio.getChannelDataSlice
389 |
390 |
391 | {-| Set a slice of channel data in the buffer.
392 |
393 | This method allows you to modify the channel data. Parameters are:
394 |
395 | * Channel number, starting with 0
396 | * The starting frame to modify
397 | * The new channel data
398 | * The AudioBuffer
399 | -}
400 | setChannelDataSlice : Int -> Int -> List Float -> AudioBuffer -> AudioBuffer
401 | setChannelDataSlice =
402 | Native.WebAudio.setChannelDataSlice
403 |
404 |
405 | {-| AudioNodes
406 |
407 | AudioNodes make up the building blocks of your audio signal graph. There are
408 | source nodes which produce an audio stream, destination nodes which can create
409 | sound from the stream, and processing nodes which allow you to modify the
410 | stream such as filters, delays, and effects.
411 |
412 | Audio Nodes have the following properties:
413 | * inputs: The number of inputs for this node. 0 means this is a source node.
414 | * outputs: The number of outputs for this node. 0 means this is a destination
415 | node.
416 | -}
417 | type alias AudioNode a =
418 | { a | inputs : Int, outputs : Int }
419 |
420 |
421 | {-| How channels are counted during up-mixing and down-mixing
422 | -}
423 | type ChannelCountMode
424 | = Max
425 | | ClampedMax
426 | | Explicit
427 |
428 |
429 | {-| How individual channels are treated when up-mixing and down-mixing
430 | -}
431 | type ChannelInterpretation
432 | = Speakers
433 | | Discrete
434 |
435 |
436 | {-| Connect Audio Nodes
437 |
438 | An output of node1 will be connected to an input of node2. You may specify the
439 | index of the output to connect, and the index of the input. These indexes are
440 | zero based. Fan-in and fan-out are both supported, so the output of a node can
441 | be connected to multiple nodes, and multiple nodes can be connected to a single
442 | input. This function will return node1 for chaining.
443 | -}
444 | connectNodes : AudioNode b -> Int -> Int -> AudioNode a -> AudioNode a
445 | connectNodes =
446 | Native.WebAudio.connectNodes
447 |
448 |
449 | {-| Connect an Audio Node to an Audio Param
450 |
451 | The signal from an AudioNode may be fed into an AudioParam to control the
452 | parameter value. You may also specify which output index to connect. The index
453 | is zero based. Fan-in and fan-out are both supported, so the output of a node
454 | can be connected to multiple AudioParams, and multiple AudioParams can be
455 | connected to a single node. This function will return the node for chaining.
456 | -}
457 | connectToParam : AudioParam b -> Int -> AudioNode a -> AudioNode a
458 | connectToParam =
459 | Native.WebAudio.connectToParam
460 |
461 |
462 | {-| Get a Node's Channel Count
463 |
464 | The number of channels used when up-mixing or down-mixing inputs. The default
465 | is 2 for most nodes, but some nodes determine this based on other settings. If
466 | the node has no inputs, this setting has no effect.
467 | -}
468 | getChannelCount : AudioNode a -> Int
469 | getChannelCount =
470 | Native.WebAudio.getChannelCount
471 |
472 |
473 | {-| Set a Node's Channel Count
474 |
475 | The number of channels used when up-mixing or down-mixing inputs. The default
476 | is 2 for most nodes, but some nodes determine this based on other settings. If
477 | the node has no inputs, this setting has no effect.
478 | -}
479 | setChannelCount : Int -> AudioNode a -> AudioNode a
480 | setChannelCount =
481 | Native.WebAudio.setChannelCount
482 |
483 |
484 | {-| Get a Node's Channel Count Mode
485 | @docs ChannelCountMode
486 | -}
487 | getChannelCountMode : AudioNode a -> ChannelCountMode
488 | getChannelCountMode =
489 | Native.WebAudio.getChannelCountMode
490 |
491 |
492 | {-| Set a Node's Channel Count Mode - returns the node itself for chaining.
493 | @docs ChannelCountMode
494 | -}
495 | setChannelCountMode : ChannelCountMode -> AudioNode a -> AudioNode a
496 | setChannelCountMode =
497 | Native.WebAudio.setChannelCountMode
498 |
499 |
500 | {-| Get a Node's Channel Interpretation
501 | @docs ChannelInterpretation
502 | -}
503 | getChannelInterpretation : AudioNode a -> ChannelInterpretation
504 | getChannelInterpretation =
505 | Native.WebAudio.getChannelInterpretation
506 |
507 |
508 | {-| Set a Node's Channel Interpretation - returns the node itself for chaining.
509 | @docs ChannelInterpretation
510 | -}
511 | setChannelInterpretation : ChannelInterpretation -> AudioNode a -> AudioNode a
512 | setChannelInterpretation =
513 | Native.WebAudio.setChannelInterpretation
514 |
515 |
516 | {-| "Tap" a node
517 |
518 | This is a convenience function, making it easy to access one of the node's
519 | AudioParam properties and then return the node itself at the end so you can
520 | continue to chain more functions.
521 |
522 | For example, if "node" is an OscillatorNode:
523 |
524 | ```haskell
525 | tapNode .frequency (\f -> setValue 440.0 f) node |> startOscillator 0.0
526 | ```
527 | -}
528 | tapNode : (a -> b) -> (b -> c) -> a -> a
529 | tapNode f t n =
530 | let
531 | _ =
532 | t <| f n
533 | in
534 | n
535 |
536 |
537 | {-| Type of an AnalyserNode
538 | -}
539 | type alias AnalyserNode =
540 | AudioNode {}
541 |
542 |
543 | {-| Create an AnalyserNode
544 | -}
545 | createAnalyserNode : AudioContext -> AnalyserNode
546 | createAnalyserNode =
547 | Native.WebAudio.createAnalyserNode
548 |
549 |
550 | {-| Get the FFT Size of an Analyser Node
551 | -}
552 | getFFTSize : AnalyserNode -> Int
553 | getFFTSize =
554 | Native.WebAudio.getFFTSize
555 |
556 |
557 | {-| Set the FFT Size of an Analyser Node
558 |
559 | The FFT Size must be a power of 2 between 32 to 2048. Default is 2048. This
560 | function returns the AnalyserNode for chaining
561 | -}
562 | setFFTSize : Int -> AnalyserNode -> AnalyserNode
563 | setFFTSize =
564 | Native.WebAudio.setFFTSize
565 |
566 |
567 | {-| Get the maximum power in the scaling range of the AnalyserNode
568 | -}
569 | getMaxDecibels : AnalyserNode -> Float
570 | getMaxDecibels =
571 | Native.WebAudio.getMaxDecibels
572 |
573 |
574 | {-| Set the maximum power in the scaling range of the AnalyserNode
575 |
576 | The default is -30. This function returns the AnalyserNode for chaining.
577 | -}
578 | setMaxDecibels : Float -> AnalyserNode -> AnalyserNode
579 | setMaxDecibels =
580 | Native.WebAudio.setMaxDecibels
581 |
582 |
583 | {-| Get the minimum power in the scaling range of the AnalyserNode
584 | -}
585 | getMinDecibels : AnalyserNode -> Float
586 | getMinDecibels =
587 | Native.WebAudio.getMinDecibels
588 |
589 |
590 | {-| Set the minimum power in the scaling range of the AnalyserNode
591 |
592 | The default is -100. This function returns the AnalyserNode for chaining.
593 | -}
594 | setMinDecibels : Float -> AnalyserNode -> AnalyserNode
595 | setMinDecibels =
596 | Native.WebAudio.setMinDecibels
597 |
598 |
599 | {-| Get the smoothing constant for the AnalyserNode
600 | -}
601 | getSmoothingConstant : AnalyserNode -> Float
602 | getSmoothingConstant =
603 | Native.WebAudio.getSmoothingConstant
604 |
605 |
606 | {-| Set the smoothing constant for the AnalyserNode
607 |
608 | A value from 0 to 1, where 0 represents no averaging. Default is 0.8. This
609 | function returns the AnalyserNode for chaining.
610 | -}
611 | setSmoothingConstant : Float -> AnalyserNode -> AnalyserNode
612 | setSmoothingConstant =
613 | Native.WebAudio.setSmoothingConstant
614 |
615 |
616 | {-| Get frequency data from the AnalyserNode
617 |
618 | A value of 0 equals the minDecibels setting, and a value of 255 equals the
619 | maxDecibels setting.
620 | -}
621 | getByteFrequencyData : AnalyserNode -> List Int
622 | getByteFrequencyData =
623 | Native.WebAudio.getByteFrequencyData
624 |
625 |
626 | {-| Get time domain data from the AnalyserNode
627 |
628 | A value of 0 equals the minDecibels setting, and a value of 255 equals the
629 | maxDecibels setting.
630 | -}
631 | getByteTimeDomainData : AnalyserNode -> List Int
632 | getByteTimeDomainData =
633 | Native.WebAudio.getByteTimeDomainData
634 |
635 |
636 | {-| Get frequency data from the AnalyserNode
637 |
638 | Values are in the range of minDecibels to maxDecibels.
639 | -}
640 | getFloatFrequencyData : AnalyserNode -> List Float
641 | getFloatFrequencyData =
642 | Native.WebAudio.getFloatFrequencyData
643 |
644 |
645 | {-| Get time domain data from the AnalyserNode
646 |
647 | Values are in the range of minDecibels to maxDecibels.
648 | -}
649 | getFloatTimeDomainData : AnalyserNode -> List Float
650 | getFloatTimeDomainData =
651 | Native.WebAudio.getFloatTimeDomainData
652 |
653 |
654 | {-| Type of an AudioBufferSourceNode
655 | -}
656 | type alias AudioBufferSourceNode =
657 | AudioNode { playbackRate : AudioParam, ended : Bool }
658 |
659 |
660 | {-| Create an AudioBufferSourceNode
661 | -}
662 | createAudioBufferSourceNode : AudioContext -> AudioBufferSourceNode
663 | createAudioBufferSourceNode =
664 | Native.WebAudio.createAudioBufferSourceNode
665 |
666 |
667 | {-| Get the AudioBuffer associated with the AudioBufferSourceNode
668 | -}
669 | getAudioBufferFromNode : AudioBufferSourceNode -> AudioBuffer
670 | getAudioBufferFromNode =
671 | Native.WebAudio.getAudioBufferFromNode
672 |
673 |
674 | {-| Set the AudioBuffer associated with the AudioBufferSourceNode
675 | -}
676 | setAudioBufferForNode : AudioBuffer -> AudioBufferSourceNode -> AudioBufferSourceNode
677 | setAudioBufferForNode =
678 | Native.WebAudio.setAudioBufferForNode
679 |
680 |
681 | {-| Get whether or not the AudioBufferSourceNode is looping.
682 | -}
683 | getAudioBufferIsLooping : AudioBufferSourceNode -> Bool
684 | getAudioBufferIsLooping =
685 | Native.WebAudio.getAudioBufferIsLooping
686 |
687 |
688 | {-| Set whether or not the AudioBufferSourceNode should loop.
689 | -}
690 | setAudioBufferIsLooping : Bool -> AudioBufferSourceNode -> AudioBufferSourceNode
691 | setAudioBufferIsLooping =
692 | Native.WebAudio.setAudioBufferIsLooping
693 |
694 |
695 | {-| Get the starting point for looping in seconds.
696 | -}
697 | getAudioBufferLoopStart : AudioBufferSourceNode -> Float
698 | getAudioBufferLoopStart =
699 | Native.WebAudio.getAudioBufferLoopStart
700 |
701 |
702 | {-| Set the starting point for looping in seconds.
703 | -}
704 | setAudioBufferLoopStart : Float -> AudioBufferSourceNode -> AudioBufferSourceNode
705 | setAudioBufferLoopStart =
706 | Native.WebAudio.setAudioBufferLoopStart
707 |
708 |
709 | {-| Get the ending point for the looping in seconds.
710 | -}
711 | getAudioBufferLoopEnd : AudioBufferSourceNode -> Float
712 | getAudioBufferLoopEnd =
713 | Native.WebAudio.getAudioBufferLoopEnd
714 |
715 |
716 | {-| Set the ending point for the looping in seconds.
717 | -}
718 | setAudioBufferLoopEnd : Float -> AudioBufferSourceNode -> AudioBufferSourceNode
719 | setAudioBufferLoopEnd =
720 | Native.WebAudio.setAudioBufferLoopEnd
721 |
722 |
723 | {-| Start the AudioBufferSourceNode
724 |
725 | The parameters are:
726 | * The start time, relative to the context's current time
727 | * The offset into the AudioBuffer to start at, in seconds
728 | * The duration to play - if Nothing, plays until the end
729 | -}
730 | startAudioBufferNode : Float -> Float -> Maybe Float -> AudioBufferSourceNode -> AudioBufferSourceNode
731 | startAudioBufferNode =
732 | Native.WebAudio.startAudioBufferNode
733 |
734 |
735 | {-| Stops the AudioBufferSourceNode
736 |
737 | You may specify when to stop it.
738 | -}
739 | stopAudioBufferNode : Float -> AudioBufferSourceNode -> AudioBufferSourceNode
740 | stopAudioBufferNode =
741 | Native.WebAudio.stopAudioBufferNode
742 |
743 |
744 | {-| Type of an AudioDestinationNode
745 | -}
746 | type alias AudioDestinationNode =
747 | AudioNode {}
748 |
749 |
750 | {-| Get the AudioDestinationNode for the given context
751 |
752 | Each context has only one AudioDestinationNode.
753 | -}
754 | getDestinationNode : AudioContext -> AudioDestinationNode
755 | getDestinationNode =
756 | Native.WebAudio.getDestinationNode
757 |
758 |
759 | {-| Get the maximum number of channels
760 | -}
761 | getMaxChannelCount : AudioDestinationNode -> Int
762 | getMaxChannelCount =
763 | Native.WebAudio.getMaxChannelCount
764 |
765 |
766 |
767 | {- TODO: Type of an AudioWorkerNode -}
768 |
769 |
770 | {-| Type of a BiquadFilterNode
771 | -}
772 | type alias BiquadFilterNode =
773 | AudioNode { frequency : AudioParam, detune : AudioParam, q : AudioParam, gain : AudioParam }
774 |
775 |
776 | {-| Biquad Filter Type
777 | -}
778 | type BiquadFilterType
779 | = LowPass
780 | | HighPass
781 | | BandPass
782 | | LowShelf
783 | | HighShelf
784 | | Peaking
785 | | Notch
786 | | AllPass
787 |
788 |
789 | {-| Create a BiquadFilterNode
790 | -}
791 | createBiquadFilterNode : AudioContext -> BiquadFilterNode
792 | createBiquadFilterNode =
793 | Native.WebAudio.createBiquadFilterNode
794 |
795 |
796 | {-| Get the type of the BiquadFilterNode
797 | -}
798 | getFilterType : BiquadFilterNode -> BiquadFilterType
799 | getFilterType =
800 | Native.WebAudio.getFilterType
801 |
802 |
803 | {-| Set the type of the BiquadFilterNode
804 |
805 | The type of filter determines what the parameters mean. This function returns
806 | the BiquadFilterNode for chaining.
807 | -}
808 | setFilterType : BiquadFilterType -> BiquadFilterNode -> BiquadFilterNode
809 | setFilterType =
810 | Native.WebAudio.setFilterType
811 |
812 |
813 | {-| Type of a ChannelMergerNode
814 | -}
815 | type alias ChannelMergerNode =
816 | AudioNode {}
817 |
818 |
819 | {-| Create a ChannelMergerNode
820 |
821 | You may specify the number of inputs as the second parameter.
822 | -}
823 | createChannelMergerNode : AudioContext -> Int -> ChannelMergerNode
824 | createChannelMergerNode =
825 | Native.WebAudio.createChannelMergerNode
826 |
827 |
828 | {-| Type of a ChannelSplitterNode
829 | -}
830 | type alias ChannelSplitterNode =
831 | AudioNode {}
832 |
833 |
834 | {-| Create a ChannelSplitterNode
835 |
836 | You may specify the number of outputs as the second parameter
837 | -}
838 | createChannelSplitterNode : AudioContext -> Int -> ChannelSplitterNode
839 | createChannelSplitterNode =
840 | Native.WebAudio.createChannelSplitterNode
841 |
842 |
843 |
844 | {- TODO: Type of a ConvolverNode -}
845 |
846 |
847 | {-| Type of a DelayNode
848 | -}
849 | type alias DelayNode =
850 | AudioNode { delayTime : AudioParam }
851 |
852 |
853 | {-| Create a DelayNode
854 |
855 | You may specify the maximum delay time as the second parameter.
856 | -}
857 | createDelayNode : AudioContext -> Float -> DelayNode
858 | createDelayNode =
859 | Native.WebAudio.createDelayNode
860 |
861 |
862 | {-| Type of a DynamicsCompressorNode
863 | -}
864 | type alias DynamicsCompressorNode =
865 | AudioNode { threshold : AudioParam, knee : AudioParam, ratio : AudioParam, reduction : AudioParam, attack : AudioParam, release : AudioParam }
866 |
867 |
868 | {-| Create a DynamicsCompressorNode
869 | -}
870 | createDynamicsCompressorNode : AudioContext -> DynamicsCompressorNode
871 | createDynamicsCompressorNode =
872 | Native.WebAudio.createDynamicsCompressorNode
873 |
874 |
875 | {-| Type of a GainNode
876 | -}
877 | type alias GainNode =
878 | AudioNode { gain : AudioParam }
879 |
880 |
881 | {-| Create a GainNode
882 | -}
883 | createGainNode : AudioContext -> GainNode
884 | createGainNode =
885 | Native.WebAudio.createGainNode
886 |
887 |
888 | {-| Type of a MediaElementAudioSourceNode
889 | -}
890 | type alias MediaElementAudioSourceNode =
891 | AudioNode {}
892 |
893 |
894 | {-| Create a MediaElementAudioSourceNode using a hidden audio tag
895 | -}
896 | createHiddenMediaElementAudioSourceNode : AudioContext -> MediaElementAudioSourceNode
897 | createHiddenMediaElementAudioSourceNode =
898 | Native.WebAudio.createHiddenMediaElementAudioSourceNode
899 |
900 |
901 | {-| Get whether or not the MediaElementAudioSourceNode should loop
902 | -}
903 | getMediaElementIsLooping : MediaElementAudioSourceNode -> Bool
904 | getMediaElementIsLooping =
905 | Native.WebAudio.getMediaElementIsLooping
906 |
907 |
908 | {-| Set whether or not the MediaElementAudioSourceNode should loop
909 | -}
910 | setMediaElementIsLooping : Bool -> MediaElementAudioSourceNode -> MediaElementAudioSourceNode
911 | setMediaElementIsLooping =
912 | Native.WebAudio.setMediaElementIsLooping
913 |
914 |
915 | {-| Get the source of the MediaElementAudioSourceNode
916 | -}
917 | getMediaElementSource : MediaElementAudioSourceNode -> String
918 | getMediaElementSource =
919 | Native.WebAudio.getMediaElementSource
920 |
921 |
922 | {-| Set the source of the MediaElementAudioSourceNode
923 | -}
924 | setMediaElementSource : String -> MediaElementAudioSourceNode -> MediaElementAudioSourceNode
925 | setMediaElementSource =
926 | Native.WebAudio.setMediaElementSource
927 |
928 |
929 | {-| Play the MediaElementAudioSourceNode
930 | -}
931 | playMediaElement : MediaElementAudioSourceNode -> MediaElementAudioSourceNode
932 | playMediaElement =
933 | Native.WebAudio.playMediaElement
934 |
935 |
936 | {-| Pause the MediaElementAudioSourceNode
937 | -}
938 | pauseMediaElement : MediaElementAudioSourceNode -> MediaElementAudioSourceNode
939 | pauseMediaElement =
940 | Native.WebAudio.pauseMediaElement
941 |
942 |
943 | {-| TODO: add documentation here
944 | -}
945 | type alias MediaStreamAudioSourceNode =
946 | AudioNode {}
947 |
948 |
949 | {- TODO: add documentation here
950 | createMediaStreamAudioSourceNode : AudioContext -> MediaStream -> MediaStreamAudioSourceNode
951 | createMediaStreamAudioSourceNode =
952 | Native.WebAudio.createMediaStreamAudioSourceNode
953 | -}
954 |
955 |
956 |
957 | {- TODO: Type of a MediaStreamAudioDestinationNode -}
958 |
959 |
960 | {-| Type of an OscillatorNode
961 | -}
962 | type alias OscillatorNode =
963 | AudioNode { frequency : AudioParam, detune : AudioParam }
964 |
965 |
966 | {-| Wave types for OscillatorNodes
967 |
968 | TODO: Custom
969 | -}
970 | type OscillatorWaveType
971 | = Sine
972 | | Square
973 | | Sawtooth
974 | | Triangle
975 |
976 |
977 | {-| Create an OscillatorNode
978 |
979 | Second parameter is the wave type of the oscillator
980 | -}
981 | createOscillatorNode : AudioContext -> OscillatorWaveType -> OscillatorNode
982 | createOscillatorNode =
983 | Native.WebAudio.createOscillatorNode
984 |
985 |
986 | {-| Get the oscillator wave type
987 | -}
988 | getOscillatorWaveType : OscillatorNode -> OscillatorWaveType
989 | getOscillatorWaveType =
990 | Native.WebAudio.getOscillatorWaveType
991 |
992 |
993 | {-| Set the oscillator wave type
994 |
995 | This function returns the oscillator for chaining.
996 | -}
997 | setOscillatorWaveType : OscillatorWaveType -> OscillatorNode -> OscillatorNode
998 | setOscillatorWaveType =
999 | Native.WebAudio.setOscillatorWaveType
1000 |
1001 |
1002 | {-| Schedule the Oscillator to start
1003 |
1004 | This method returns the oscillator for chaining.
1005 | -}
1006 | startOscillator : Float -> OscillatorNode -> OscillatorNode
1007 | startOscillator =
1008 | Native.WebAudio.startOscillator
1009 |
1010 |
1011 | {-| Schedule a stop time for the Oscillator.
1012 |
1013 | WARNING:
1014 | After an end time has been set, the oscillator can no longer be started. Since
1015 | an oscillator can no longer be started after it has been stopped, the
1016 | oscillator is essentially useless. The system is supposed to automatically clean
1017 | up AudioNodes that are no longer in use, provided that the node meets a couple
1018 | requirements - one of which is that there are no more references to it.
1019 | Therefore, Elm.WebAudio will automatically free the reference to the underlying
1020 | javascript object as soon as a stop has been scheduled. What this means, from a
1021 | practical standpoint, is that any further attempt to manipulate the Oscillator
1022 | will result in a javascript error. It's not pretty, but, honestly, neither is
1023 | the WebAudio Javascript API.
1024 | -}
1025 | stopOscillator : Float -> OscillatorNode -> ()
1026 | stopOscillator =
1027 | Native.WebAudio.stopOscillator
1028 |
1029 |
1030 | {-| Type of a PannerNode
1031 | -}
1032 | type alias PannerNode =
1033 | AudioNode {}
1034 |
1035 |
1036 | {-| Panning Model
1037 | -}
1038 | type PanningModel
1039 | = EqualPower
1040 | | HRTF
1041 |
1042 |
1043 | {-| Distance Model
1044 | -}
1045 | type DistanceModel
1046 | = Linear
1047 | | Inverse
1048 | | Exponential
1049 |
1050 |
1051 | {-| Create a PannerNode
1052 | -}
1053 | createPannerNode : AudioContext -> PannerNode
1054 | createPannerNode =
1055 | Native.WebAudio.createPannerNode
1056 |
1057 |
1058 | {-| Get the Panning Model of the Panner
1059 | -}
1060 | getPanningModel : PannerNode -> PanningModel
1061 | getPanningModel =
1062 | Native.WebAudio.getPanningModel
1063 |
1064 |
1065 | {-| Set the Panning Model of the Panner
1066 |
1067 | This function returns the PannerNode for chaining.
1068 | -}
1069 | setPanningModel : PanningModel -> PannerNode -> PannerNode
1070 | setPanningModel =
1071 | Native.WebAudio.setPanningModel
1072 |
1073 |
1074 | {-| Get the Distance Model of the Panner
1075 | -}
1076 | getDistanceModel : PannerNode -> DistanceModel
1077 | getDistanceModel =
1078 | Native.WebAudio.getDistanceModel
1079 |
1080 |
1081 | {-| Set the Distance Model of the Panner
1082 |
1083 | This function returns the PannerNode for chaining.
1084 | -}
1085 | setDistanceModel : DistanceModel -> PannerNode -> PannerNode
1086 | setDistanceModel =
1087 | Native.WebAudio.setDistanceModel
1088 |
1089 |
1090 | {-| Get the reference distance of the panner
1091 | -}
1092 | getReferenceDistance : PannerNode -> Float
1093 | getReferenceDistance =
1094 | Native.WebAudio.getReferenceDistance
1095 |
1096 |
1097 | {-| Set the reference distance of the panner
1098 | -}
1099 | setReferenceDistance : Float -> PannerNode -> PannerNode
1100 | setReferenceDistance =
1101 | Native.WebAudio.setReferenceDistance
1102 |
1103 |
1104 | {-| Get the max distance of the panner
1105 | -}
1106 | getMaxDistance : PannerNode -> Float
1107 | getMaxDistance =
1108 | Native.WebAudio.getMaxDistance
1109 |
1110 |
1111 | {-| Set the max distance of the pannel
1112 | -}
1113 | setMaxDistance : Float -> PannerNode -> PannerNode
1114 | setMaxDistance =
1115 | Native.WebAudio.setMaxDistance
1116 |
1117 |
1118 | {-| Get the rolloff factor for the panner
1119 | -}
1120 | getRolloffFactor : PannerNode -> Float
1121 | getRolloffFactor =
1122 | Native.WebAudio.getRolloffFactor
1123 |
1124 |
1125 | {-| Set the rolloff factor for the panner
1126 | -}
1127 | setRolloffFactor : Float -> PannerNode -> PannerNode
1128 | setRolloffFactor =
1129 | Native.WebAudio.setRolloffFactor
1130 |
1131 |
1132 | {-| Get the cone inner angle for the panner
1133 | -}
1134 | getConeInnerAngle : PannerNode -> Float
1135 | getConeInnerAngle =
1136 | Native.WebAudio.getConeInnerAngle
1137 |
1138 |
1139 | {-| Set the cone inner angle for the panner
1140 | -}
1141 | setConeInnerAngle : Float -> PannerNode -> PannerNode
1142 | setConeInnerAngle =
1143 | Native.WebAudio.setConeInnerAngle
1144 |
1145 |
1146 | {-| Get the cone outer angle for the panner
1147 | -}
1148 | getConeOuterAngle : PannerNode -> Float
1149 | getConeOuterAngle =
1150 | Native.WebAudio.getConeOuterAngle
1151 |
1152 |
1153 | {-| Set the cone outer angle for the panner
1154 | -}
1155 | setConeOuterAngle : Float -> PannerNode -> PannerNode
1156 | setConeOuterAngle =
1157 | Native.WebAudio.setConeOuterAngle
1158 |
1159 |
1160 | {-| Get the cone outer gain for the panner
1161 | -}
1162 | getConeOuterGain : PannerNode -> Float
1163 | getConeOuterGain =
1164 | Native.WebAudio.getConeOuterGain
1165 |
1166 |
1167 | {-| Set the cone outer gain for the panner
1168 | -}
1169 | setConeOuterGain : Float -> PannerNode -> PannerNode
1170 | setConeOuterGain =
1171 | Native.WebAudio.setConeOuterGain
1172 |
1173 |
1174 | {-| Set the listener position for the panner
1175 | -}
1176 | setPosition : Float -> Float -> Float -> PannerNode -> PannerNode
1177 | setPosition =
1178 | Native.WebAudio.setPosition
1179 |
1180 |
1181 | {-| Set the listener orientation for the panner
1182 | -}
1183 | setOrientation : Float -> Float -> Float -> PannerNode -> PannerNode
1184 | setOrientation =
1185 | Native.WebAudio.setOrientation
1186 |
1187 |
1188 | {-| Set the listener velocity for the panner
1189 | -}
1190 | setVelocity : Float -> Float -> Float -> PannerNode -> PannerNode
1191 | setVelocity =
1192 | Native.WebAudio.setVelocity
1193 |
1194 |
1195 |
1196 | {- TODO: Type of a ScriptProcessorNode -}
1197 | {- TODO: Type of a WaveShaperNode -}
1198 |
--------------------------------------------------------------------------------