├── .npmignore
├── tests
├── core
│ ├── stream.coffee
│ ├── events.coffee
│ ├── buffer.coffee
│ ├── bufferlist.coffee
│ └── bitstream.coffee
├── config.coffee
├── decoders
│ ├── xlaw.coffee
│ ├── lpcm.coffee
│ └── shared.coffee
├── helpers.coffee
├── test.html
├── test.coffee
├── README.md
├── sources
│ ├── file.coffee
│ ├── http.coffee
│ └── buffer.coffee
├── demuxers
│ ├── au.coffee
│ ├── wave.coffee
│ ├── aiff.coffee
│ ├── shared.coffee
│ ├── caf.coffee
│ └── m4a.coffee
├── crc32.coffee
└── qunit
│ └── qunit.css
├── .gitmodules
├── .gitignore
├── src
├── filters
│ ├── volume.coffee
│ └── balance.coffee
├── filter.coffee
├── aurora.coffee
├── core
│ ├── events.coffee
│ ├── base.coffee
│ ├── bufferlist.coffee
│ ├── buffer.coffee
│ ├── bitstream.coffee
│ └── stream.coffee
├── queue.coffee
├── sources
│ ├── node
│ │ ├── file.coffee
│ │ └── http.coffee
│ ├── buffer.coffee
│ └── browser
│ │ ├── file.coffee
│ │ └── http.coffee
├── test.html
├── devices
│ ├── node-speaker.coffee
│ ├── mozilla.coffee
│ ├── webaudio.coffee
│ └── resampler.js
├── decoders
│ ├── xlaw.coffee
│ └── lpcm.coffee
├── device.coffee
├── demuxers
│ ├── au.coffee
│ ├── wave.coffee
│ ├── aiff.coffee
│ ├── caf.coffee
│ └── m4a.coffee
├── decoder.coffee
├── demuxer.coffee
├── asset.coffee
└── player.coffee
├── browser.coffee
├── package.json
├── node.coffee
└── README.md
/.npmignore:
--------------------------------------------------------------------------------
1 | src/
2 | tests/
3 | experiments/
4 | .git*
5 | .DS_Store
6 | *.coffee
7 | *.html
--------------------------------------------------------------------------------
/tests/core/stream.coffee:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fabslab/aurora.js/HEAD/tests/core/stream.coffee
--------------------------------------------------------------------------------
/tests/config.coffee:
--------------------------------------------------------------------------------
1 | # set this to the base tests directory on an HTTP server
2 | HTTP_BASE = 'http://localhost:8000/'
--------------------------------------------------------------------------------
/.gitmodules:
--------------------------------------------------------------------------------
1 | [submodule "tests/data"]
2 | path = tests/data
3 | url = https://github.com/audiocogs/aurora.js-test.git
4 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | .DS_Store
2 | .DS_Store?
3 | ._*
4 | .Spotlight-V100
5 | .Trashes
6 | Icon?
7 | ehthumbs.db
8 | Thumbs.db
9 | node_modules
10 | node.js
11 | aurora.js
--------------------------------------------------------------------------------
/tests/decoders/xlaw.coffee:
--------------------------------------------------------------------------------
1 | #import "shared.coffee"
2 |
3 | module 'decoders/xlaw', ->
4 | decoderTest 'alaw',
5 | file: 'au/alaw.au'
6 | data: '1543ac89'
7 |
8 | decoderTest 'ulaw',
9 | file: 'm4a/ulaw.mov'
10 | data: '565b7fd'
--------------------------------------------------------------------------------
/src/filters/volume.coffee:
--------------------------------------------------------------------------------
1 | class AV.VolumeFilter extends AV.Filter
2 | process: (buffer) ->
3 | return if @value >= 100
4 | vol = Math.max(0, Math.min(100, @value)) / 100
5 |
6 | for i in [0...buffer.length] by 1
7 | buffer[i] *= vol
8 |
9 | return
--------------------------------------------------------------------------------
/browser.coffee:
--------------------------------------------------------------------------------
1 | do ->
2 | global = this
3 |
4 | #import "src/aurora.coffee"
5 | #import "src/sources/browser/http.coffee"
6 | #import "src/sources/browser/file.coffee"
7 | #import "src/devices/webaudio.coffee"
8 | #import "src/devices/mozilla.coffee"
9 |
10 | global.AV = AV
--------------------------------------------------------------------------------
/tests/helpers.coffee:
--------------------------------------------------------------------------------
1 | # make QUnit print test results to the console in Node
2 | if not QUnit? and require?
3 | AV = require '../node.js'
4 | QUnit = require 'qunit-cli'
5 |
6 | # setup testing environment
7 | assert = QUnit
8 | test = QUnit.test
9 | module = (name, fn) ->
10 | QUnit.module name
11 | fn()
--------------------------------------------------------------------------------
/src/filter.coffee:
--------------------------------------------------------------------------------
1 | class AV.Filter
2 | constructor: (context, key) ->
3 | # default constructor takes a single value
4 | # override to take more parameters
5 | if context and key
6 | Object.defineProperty this, 'value',
7 | get: -> context[key]
8 |
9 | process: (buffer) ->
10 | # override this method
11 | return
--------------------------------------------------------------------------------
/src/filters/balance.coffee:
--------------------------------------------------------------------------------
1 | class AV.BalanceFilter extends AV.Filter
2 | process: (buffer) ->
3 | return if @value is 0
4 | pan = Math.max(-50, Math.min(50, @value))
5 |
6 | for i in [0...buffer.length] by 2
7 | buffer[i] *= Math.min(1, (50 - pan) / 50)
8 | buffer[i + 1] *= Math.min(1, (50 + pan) / 50)
9 |
10 | return
--------------------------------------------------------------------------------
/tests/test.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | QUnit
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
--------------------------------------------------------------------------------
/tests/test.coffee:
--------------------------------------------------------------------------------
1 | #import "config"
2 | #import "helpers"
3 |
4 | #import "core/events"
5 | #import "core/buffer"
6 | #import "core/bufferlist"
7 | #import "core/stream"
8 | #import "core/bitstream"
9 |
10 | #import "sources/http"
11 | #import "sources/file"
12 | #import "sources/buffer"
13 |
14 | #import "demuxers/m4a"
15 | #import "demuxers/caf"
16 | #import "demuxers/aiff"
17 | #import "demuxers/wave"
18 | #import "demuxers/au"
19 |
20 | #import "decoders/lpcm"
21 | #import "decoders/xlaw"
--------------------------------------------------------------------------------
/src/aurora.coffee:
--------------------------------------------------------------------------------
1 | AV = {}
2 |
3 | #import "core/base.coffee"
4 | #import "core/buffer.coffee"
5 | #import "core/bufferlist.coffee"
6 | #import "core/stream.coffee"
7 | #import "core/bitstream.coffee"
8 | #import "core/events.coffee"
9 |
10 | #import "sources/buffer.coffee"
11 |
12 | #import "demuxer.coffee"
13 | #import "decoder.coffee"
14 | #import "queue.coffee"
15 | #import "device.coffee"
16 | #import "asset.coffee"
17 | #import "player.coffee"
18 |
19 | #import "filter.coffee"
20 | #import "filters/volume.coffee"
21 | #import "filters/balance.coffee"
22 |
23 | #import "demuxers/caf.coffee"
24 | #import "demuxers/m4a.coffee"
25 | #import "demuxers/aiff.coffee"
26 | #import "demuxers/wave.coffee"
27 | #import "demuxers/au.coffee"
28 |
29 | #import "decoders/lpcm.coffee"
30 | #import "decoders/xlaw.coffee"
31 |
32 |
--------------------------------------------------------------------------------
/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "av",
3 | "description": "Audio decoding framework",
4 | "version": "0.3.1",
5 | "author": {
6 | "name": "Audiocogs",
7 | "url": "http://audiocogs.org/"
8 | },
9 | "repository": {
10 | "type": "git",
11 | "url": "https://github.com/audiocogs/aurora.js.git"
12 | },
13 | "bugs": "https://github.com/audiocogs/aurora.js/issues",
14 | "dependencies": {
15 | "speaker": "~0.0.10"
16 | },
17 | "devDependencies": {
18 | "coffee-script": ">=1.0",
19 | "qunit-cli": "*",
20 | "importer": ">=0.4"
21 | },
22 | "scripts": {
23 | "prepublish": "node_modules/.bin/importer node.coffee node.js",
24 | "test": "node_modules/.bin/importer node.coffee node.js; node_modules/.bin/importer tests/test.coffee"
25 | },
26 | "engine": [ "node >= v0.6.0" ],
27 | "main": "node.js"
28 | }
29 |
--------------------------------------------------------------------------------
/src/core/events.coffee:
--------------------------------------------------------------------------------
1 | class AV.EventEmitter extends AV.Base
2 | on: (event, fn) ->
3 | @events ?= {}
4 | @events[event] ?= []
5 | @events[event].push(fn)
6 |
7 | off: (event, fn) ->
8 | return unless @events?[event]
9 | index = @events[event].indexOf(fn)
10 | @events[event].splice(index, 1) if ~index
11 |
12 | once: (event, fn) ->
13 | @on event, cb = ->
14 | @off event, cb
15 | fn.apply(this, arguments)
16 |
17 | emit: (event, args...) ->
18 | return unless @events?[event]
19 |
20 | # shallow clone with .slice() so that removing a handler
21 | # while event is firing (as in once) doesn't cause errors
22 | for fn in @events[event].slice()
23 | fn.apply(this, args)
24 |
25 | return
--------------------------------------------------------------------------------
/node.coffee:
--------------------------------------------------------------------------------
1 | #import "src/aurora.coffee"
2 | #import "src/sources/node/http.coffee"
3 | #import "src/sources/node/file.coffee"
4 | #import "src/devices/node-speaker.coffee"
5 |
6 | AV.isNode = true
7 | AV.require = (modules...) ->
8 | Module = require 'module'
9 |
10 | # create a temporary reference to the AV namespace
11 | # that we can access from within the required modules
12 | key = "__AV__#{Date.now()}"
13 | Module::[key] = AV
14 |
15 | # temporarily override the module wrapper
16 | wrapper = Module.wrapper[0]
17 | Module.wrapper[0] += "var AV = module['#{key}'];"
18 |
19 | # require the modules
20 | for module in modules
21 | require module
22 |
23 | # replace the wrapper and delete the temporary AV reference
24 | Module.wrapper[0] = wrapper
25 | delete Module::[key]
26 |
27 | return
28 |
29 | module.exports = AV
--------------------------------------------------------------------------------
/tests/decoders/lpcm.coffee:
--------------------------------------------------------------------------------
1 | #import "shared.coffee"
2 |
3 | module 'decoders/lpcm', ->
4 | decoderTest 'i8',
5 | file: 'm4a/i8.mov'
6 | data: 'f12b56ad'
7 |
8 | decoderTest 'lei16',
9 | file: 'wave/lei16.wav'
10 | data: '6b6b722b'
11 |
12 | decoderTest 'bei16',
13 | file: 'aiff/bei16.aiff'
14 | data: 'ca0bae1e'
15 |
16 | decoderTest 'bei24',
17 | file: 'aiff/bei24.aiff'
18 | data: '689eecfa'
19 |
20 | decoderTest 'lei24',
21 | file: 'wave/lei24.wav'
22 | data: '5a265e8a'
23 |
24 | decoderTest 'bef32',
25 | file: 'au/bef32.au'
26 | data: '5cc026c5'
27 |
28 | decoderTest 'lef32',
29 | file: 'wave/lef32.wav'
30 | data: '9b2a9317'
31 |
32 | decoderTest 'lef64',
33 | file: 'caf/lef64.caf'
34 | data: '9a3372e'
35 |
36 | # TODO: bef64
--------------------------------------------------------------------------------
/src/queue.coffee:
--------------------------------------------------------------------------------
1 | class AV.Queue extends AV.EventEmitter
2 | constructor: (@asset) ->
3 | @readyMark = 64
4 | @finished = false
5 | @buffering = true
6 | @ended = false
7 |
8 | @buffers = []
9 | @asset.on 'data', @write
10 | @asset.on 'end', =>
11 | @ended = true
12 |
13 | @asset.decodePacket()
14 |
15 | write: (buffer) =>
16 | @buffers.push buffer if buffer
17 |
18 | if @buffering
19 | if @buffers.length >= @readyMark or @ended
20 | @buffering = false
21 | @emit 'ready'
22 | else
23 | @asset.decodePacket()
24 |
25 | read: ->
26 | return null if @buffers.length is 0
27 |
28 | @asset.decodePacket()
29 | return @buffers.shift()
30 |
31 | reset: ->
32 | @buffers.length = 0
33 | @buffering = true
34 | @asset.decodePacket()
--------------------------------------------------------------------------------
/src/sources/node/file.coffee:
--------------------------------------------------------------------------------
1 | class AV.FileSource extends AV.EventEmitter
2 | fs = require 'fs'
3 | constructor: (@filename) ->
4 | @stream = null
5 | @loaded = 0
6 | @size = null
7 |
8 | getSize: ->
9 | fs.stat @filename, (err, stat) =>
10 | return @emit 'error', err if err
11 |
12 | @size = stat.size
13 | @start()
14 |
15 | start: ->
16 | if not @size?
17 | return @getSize()
18 |
19 | if @stream
20 | return @stream.resume()
21 |
22 | @stream = fs.createReadStream @filename
23 |
24 | @stream.on 'data', (buf) =>
25 | @loaded += buf.length
26 | @emit 'progress', @loaded / @size * 100
27 | @emit 'data', new AV.Buffer(new Uint8Array(buf))
28 |
29 | @stream.on 'end', =>
30 | @emit 'end'
31 |
32 | @stream.on 'error', (err) =>
33 | @pause()
34 | @emit 'error', err
35 |
36 | pause: ->
37 | @stream.pause()
--------------------------------------------------------------------------------
/src/sources/buffer.coffee:
--------------------------------------------------------------------------------
1 | class AV.BufferSource extends AV.EventEmitter
2 | constructor: (input) ->
3 | # Now make an AV.BufferList
4 | if input instanceof AV.BufferList
5 | @list = input
6 |
7 | else
8 | @list = new AV.BufferList
9 | @list.append new AV.Buffer(input)
10 |
11 | @paused = true
12 |
13 | setImmediate = global.setImmediate or (fn) ->
14 | global.setTimeout fn, 0
15 |
16 | clearImmediate = global.clearImmediate or (timer) ->
17 | global.clearTimeout timer
18 |
19 | start: ->
20 | @paused = false
21 | @_timer = setImmediate @loop
22 |
23 | loop: =>
24 | @emit 'progress', (@list.numBuffers - @list.availableBuffers + 1) / @list.numBuffers * 100 | 0
25 | @emit 'data', @list.first
26 | if @list.advance()
27 | setImmediate @loop
28 | else
29 | @emit 'end'
30 |
31 | pause: ->
32 | clearImmediate @_timer
33 | @paused = true
34 |
35 | reset: ->
36 | @pause()
37 | @list.rewind()
--------------------------------------------------------------------------------
/tests/decoders/shared.coffee:
--------------------------------------------------------------------------------
1 | #import "../crc32.coffee"
2 |
3 | decoderTest = (name, config) ->
4 | assert.asyncTest name, ->
5 | if AV.isNode
6 | source = new AV.FileSource "#{__dirname}/data/#{config.file}"
7 | else
8 | source = new AV.HTTPSource "#{HTTP_BASE}/data/#{config.file}"
9 |
10 | source.once 'data', (chunk) ->
11 | Demuxer = AV.Demuxer.find(chunk)
12 | demuxer = new Demuxer(source, chunk)
13 |
14 | demuxer.once 'format', (format) ->
15 | Decoder = AV.Decoder.find(format.formatID)
16 | decoder = new Decoder(demuxer, format)
17 | crc = new CRC32
18 |
19 | decoder.on 'data', (chunk) ->
20 | crc.update new AV.Buffer(new Uint8Array(chunk.buffer))
21 |
22 | decoder.on 'end', ->
23 | assert.equal crc.toHex(), config.data
24 | assert.start()
25 |
26 | do read = ->
27 | continue while decoder.decode()
28 | decoder.once 'data', read
29 |
30 | source.start()
--------------------------------------------------------------------------------
/src/test.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
36 |
37 |
--------------------------------------------------------------------------------
/src/core/base.coffee:
--------------------------------------------------------------------------------
1 | #
2 | # The Base class defines an extend method so that
3 | # CoffeeScript classes can be extended easily by
4 | # plain JavaScript. Based on http://ejohn.org/blog/simple-javascript-inheritance/.
5 | #
6 |
7 | class AV.Base
8 | fnTest = /\b_super\b/
9 |
10 | @extend: (prop) ->
11 | class Class extends this
12 |
13 | if typeof prop is 'function'
14 | keys = Object.keys Class.prototype
15 | prop.call(Class, Class)
16 |
17 | prop = {}
18 | for key, fn of Class.prototype when key not in keys
19 | prop[key] = fn
20 |
21 | _super = Class.__super__
22 |
23 | for key, fn of prop
24 | # test whether the method actually uses _super() and wrap it if so
25 | if typeof fn is 'function' and fnTest.test(fn)
26 | do (key, fn) ->
27 | Class::[key] = ->
28 | tmp = this._super
29 | this._super = _super[key]
30 |
31 | ret = fn.apply(this, arguments)
32 | this._super = tmp
33 |
34 | return ret
35 |
36 | else
37 | Class::[key] = fn
38 |
39 | return Class
--------------------------------------------------------------------------------
/tests/core/events.coffee:
--------------------------------------------------------------------------------
1 | module 'core/events', ->
2 | test 'on', ->
3 | emitter = new AV.EventEmitter
4 | times = 0
5 |
6 | emitter.on 'test', (a, b) ->
7 | times++
8 | assert.equal 'a', a
9 | assert.equal 'b', b
10 |
11 | emitter.emit 'test', 'a', 'b'
12 | emitter.emit 'test', 'a', 'b'
13 | assert.equal 2, times
14 |
15 | test 'off', ->
16 | emitter = new AV.EventEmitter
17 | times = 0
18 |
19 | emitter.on 'test', fn = ->
20 | times++
21 |
22 | emitter.emit 'test'
23 | emitter.off 'test', fn
24 | emitter.emit 'test'
25 |
26 | assert.equal 1, times
27 |
28 | test 'once', ->
29 | emitter = new AV.EventEmitter
30 | times = 0
31 |
32 | emitter.once 'test', ->
33 | times++
34 |
35 | emitter.emit 'test'
36 | emitter.emit 'test'
37 | emitter.emit 'test'
38 |
39 | assert.equal 1, times
40 |
41 | test 'emit', ->
42 | emitter = new AV.EventEmitter
43 | times = 0
44 |
45 | emitter.on 'test', ->
46 | times++
47 |
48 | emitter.on 'test', ->
49 | times++
50 |
51 | emitter.emit 'test'
52 | assert.equal 2, times
--------------------------------------------------------------------------------
/src/core/bufferlist.coffee:
--------------------------------------------------------------------------------
1 | class AV.BufferList
2 | constructor: ->
3 | @first = null
4 | @last = null
5 | @numBuffers = 0
6 | @availableBytes = 0
7 | @availableBuffers = 0
8 |
9 | copy: ->
10 | result = new AV.BufferList
11 |
12 | result.first = @first
13 | result.last = @last
14 | result.numBuffers = @numBuffers
15 | result.availableBytes = @availableBytes
16 | result.availableBuffers = @availableBuffers
17 |
18 | return result
19 |
20 | append: (buffer) ->
21 | buffer.prev = @last
22 | @last?.next = buffer
23 | @last = buffer
24 | @first ?= buffer
25 |
26 | @availableBytes += buffer.length
27 | @availableBuffers++
28 | @numBuffers++
29 |
30 | advance: ->
31 | if @first
32 | @availableBytes -= @first.length
33 | @availableBuffers--
34 | @first = @first.next
35 | return @first?
36 |
37 | return false
38 |
39 | rewind: ->
40 | if @first and not @first.prev
41 | return false
42 |
43 | @first = @first?.prev or @last
44 | if @first
45 | @availableBytes += @first.length
46 | @availableBuffers++
47 |
48 | return @first?
49 |
50 | reset: ->
51 | continue while @rewind()
--------------------------------------------------------------------------------
/src/sources/node/http.coffee:
--------------------------------------------------------------------------------
1 | class AV.HTTPSource extends AV.EventEmitter
2 | http = require 'http'
3 | constructor: (@url) ->
4 | @request = null
5 | @response = null
6 |
7 | @loaded = 0
8 | @size = 0
9 |
10 | start: ->
11 | if @response?
12 | return @response.resume()
13 |
14 | @request = http.get @url
15 | @request.on 'response', (@response) =>
16 | if @response.statusCode isnt 200
17 | return @errorHandler 'Error loading file. HTTP status code ' + @response.statusCode
18 |
19 | @size = parseInt @response.headers['content-length']
20 | @loaded = 0
21 |
22 | @response.on 'data', (chunk) =>
23 | @loaded += chunk.length
24 | @emit 'progress', @loaded / @size * 100
25 | @emit 'data', new AV.Buffer(new Uint8Array(chunk))
26 |
27 | @response.on 'end', =>
28 | @emit 'end'
29 |
30 | @response.on 'error', @errorHandler
31 |
32 | @request.on 'error', @errorHandler
33 |
34 | pause: ->
35 | @response?.pause()
36 |
37 | reset: ->
38 | @pause()
39 | @request.abort()
40 | @request = null
41 | @response = null
42 |
43 | errorHandler: (err) =>
44 | @reset()
45 | @emit 'error', err
--------------------------------------------------------------------------------
/tests/README.md:
--------------------------------------------------------------------------------
1 | Tests
2 | =====
3 |
4 | The tests for Aurora are written using the [QUnit](http://qunitjs.com/) testing framework. They
5 | run in both Node.js and the browser.
6 |
7 | ##Setup
8 |
9 | First, you'll need the test data, so init your git submodules to download them, and update them
10 | if you've already downloaded them before.
11 |
12 | git submodule init
13 | git submodule update
14 |
15 | Running the tests requires running an HTTP server to host both QUnit itself (for the browser),
16 | as well as the test data files as used by both the browser and Node to test HTTP loading.
17 |
18 | To start a simple static HTTP server in the tests directory, run the following command:
19 |
20 | python -m SimpleHTTPServer
21 |
22 | If you already have the test directory on an HTTP server, all you need to do is set the base URL of
23 | the "tests" folder to the `HTTP_BASE` variable in `config.coffee`.
24 |
25 | ## To run in the browser:
26 | 1. Follow the setup steps above.
27 | 2. Start servers to host Aurora and the tests themselves:
28 |
29 | importer ../browser.coffee -p 3030
30 | importer test.coffee -p 3031
31 |
32 | You may need to install `importer` using `npm install importer -g` first.
33 |
34 | 3. Open `test.html` in your browser, using the HTTP server that you set up above.
35 |
36 | ## To run in Node:
37 | 1. Follow the setup steps above.
38 | 2. Either run `importer test.coffee` or `npm test` from the root directory.
--------------------------------------------------------------------------------
/tests/sources/file.coffee:
--------------------------------------------------------------------------------
1 | #import "../crc32.coffee"
2 |
3 | module 'sources/file', ->
4 | asyncTest = assert.asyncTest
5 |
6 | getSource = (fn) ->
7 | # if we're in Node, we can read any file we like, otherwise simulate by reading
8 | # a blob from an XHR and loading it using a FileSource
9 | if AV.isNode
10 | fn new AV.FileSource "#{__dirname}/data/m4a/base.m4a"
11 | else
12 | xhr = new XMLHttpRequest
13 | xhr.open 'GET', "#{HTTP_BASE}/data/m4a/base.m4a"
14 | xhr.responseType = 'blob'
15 | xhr.send()
16 | xhr.onload = ->
17 | fn new AV.FileSource(xhr.response)
18 |
19 | asyncTest 'data', ->
20 | getSource (source) ->
21 | crc = new CRC32
22 | source.on 'data', (chunk) ->
23 | crc.update chunk
24 |
25 | source.on 'end', ->
26 | assert.equal crc.toHex(), '84d9f967'
27 | assert.start()
28 |
29 | source.start()
30 |
31 | asyncTest 'progress', ->
32 | getSource (source) ->
33 | lastProgress = 0
34 | source.on 'progress', (progress) ->
35 | assert.ok progress > lastProgress, 'progress > lastProgress'
36 | assert.ok progress <= 100, 'progress <= 100'
37 | lastProgress = progress
38 |
39 | source.on 'end', ->
40 | assert.equal lastProgress, 100
41 | assert.start()
42 |
43 | source.start()
--------------------------------------------------------------------------------
/src/sources/browser/file.coffee:
--------------------------------------------------------------------------------
1 | class AV.FileSource extends AV.EventEmitter
2 | constructor: (@file) ->
3 | if not FileReader?
4 | return @emit 'error', 'This browser does not have FileReader support.'
5 |
6 | @offset = 0
7 | @length = @file.size
8 | @chunkSize = 1 << 20
9 | @file[@slice = 'slice'] or @file[@slice = 'webkitSlice'] or @file[@slice = 'mozSlice']
10 |
11 | start: ->
12 | if @reader
13 | return @loop() unless @active
14 |
15 | @reader = new FileReader
16 | @active = true
17 |
18 | @reader.onload = (e) =>
19 | buf = new AV.Buffer(new Uint8Array(e.target.result))
20 | @offset += buf.length
21 |
22 | @emit 'data', buf
23 | @active = false
24 | @loop() if @offset < @length
25 |
26 | @reader.onloadend = =>
27 | if @offset is @length
28 | @emit 'end'
29 | @reader = null
30 |
31 | @reader.onerror = (e) =>
32 | @emit 'error', e
33 |
34 | @reader.onprogress = (e) =>
35 | @emit 'progress', (@offset + e.loaded) / @length * 100
36 |
37 | @loop()
38 |
39 | loop: ->
40 | @active = true
41 | endPos = Math.min(@offset + @chunkSize, @length)
42 |
43 | blob = @file[@slice](@offset, endPos)
44 | @reader.readAsArrayBuffer(blob)
45 |
46 | pause: ->
47 | @active = false
48 | @reader?.abort()
49 |
50 | reset: ->
51 | @pause()
52 | @offset = 0
--------------------------------------------------------------------------------
/tests/sources/http.coffee:
--------------------------------------------------------------------------------
1 | #import "../crc32.coffee"
2 |
3 | module 'sources/http', ->
4 | asyncTest = assert.asyncTest
5 |
6 | # check that the data returned by the source is correct, using a CRC32 checksum
7 | asyncTest 'data', ->
8 | crc = new CRC32
9 | source = new AV.HTTPSource "#{HTTP_BASE}/data/m4a/base.m4a"
10 |
11 | source.on 'data', (chunk) ->
12 | crc.update chunk
13 |
14 | source.on 'end', ->
15 | assert.equal crc.toHex(), '84d9f967'
16 | assert.start()
17 |
18 | source.start()
19 |
20 | asyncTest 'progress', ->
21 | source = new AV.HTTPSource "#{HTTP_BASE}/data/m4a/base.m4a"
22 |
23 | lastProgress = 0
24 | source.on 'progress', (progress) ->
25 | assert.ok progress > lastProgress, 'progress > lastProgress'
26 | assert.ok progress <= 100, 'progress <= 100'
27 | lastProgress = progress
28 |
29 | source.on 'end', ->
30 | assert.equal lastProgress, 100
31 | assert.start()
32 |
33 | source.start()
34 |
35 | asyncTest 'invalid url error', ->
36 | source = new AV.HTTPSource 'http://dlfigu'
37 |
38 | source.on 'error', ->
39 | assert.ok true
40 | assert.start()
41 |
42 | source.start()
43 |
44 | asyncTest '404', ->
45 | source = new AV.HTTPSource "#{HTTP_BASE}/nothing.m4a"
46 |
47 | source.on 'error', (error) ->
48 | assert.ok true
49 | assert.start()
50 |
51 | source.start()
--------------------------------------------------------------------------------
/src/devices/node-speaker.coffee:
--------------------------------------------------------------------------------
1 | class NodeSpeakerDevice extends AV.EventEmitter
2 | AV.AudioDevice.register(NodeSpeakerDevice)
3 |
4 | try
5 | Speaker = require('speaker')
6 | Readable = require('stream').Readable
7 |
8 | @supported: Speaker?
9 |
10 | constructor: (@sampleRate, @channels) ->
11 | @speaker = new Speaker
12 | channels: @channels
13 | sampleRate: @sampleRate
14 | bitDepth: 32
15 | float: true
16 | signed: true
17 |
18 | @buffer = null
19 | @arr = null
20 | @currentFrame = 0
21 | @ended = false
22 |
23 | # setup a node readable stream and pipe to speaker output
24 | @input = new Readable
25 | @input._read = @refill
26 | @input.pipe @speaker
27 |
28 | refill: (n) =>
29 | {arr,buffer} = this
30 |
31 | # reuse the same buffers if possible
32 | len = n / 4
33 | if arr?.length isnt len
34 | @arr = arr = new Float32Array(len)
35 |
36 | @emit 'refill', arr
37 | return if @ended
38 |
39 | if buffer?.length isnt n
40 | @buffer = buffer = new Buffer(n)
41 |
42 | # copy the data from the Float32Array into the node buffer
43 | offset = 0
44 | for frame in arr
45 | buffer.writeFloatLE(frame, offset)
46 | offset += 4
47 |
48 | @input.push buffer
49 | @currentFrame += len / @channels
50 |
51 | destroy: ->
52 | @ended = true
53 | @input.push null
54 |
55 | getDeviceTime: ->
56 | return @currentFrame # TODO: make this more accurate
--------------------------------------------------------------------------------
/tests/demuxers/au.coffee:
--------------------------------------------------------------------------------
1 | #import "shared.coffee"
2 |
3 | module 'demuxers/au', ->
4 | demuxerTest 'bei16',
5 | file: 'au/bei16.au'
6 | format:
7 | formatID: 'lpcm'
8 | sampleRate: 44100
9 | bitsPerChannel: 16
10 | channelsPerFrame: 2
11 | bytesPerPacket: 4
12 | framesPerPacket: 1
13 | littleEndian: false
14 | floatingPoint: false
15 | duration: 7430
16 | data: 'd4c3bdc0'
17 |
18 | demuxerTest 'bef32',
19 | file: 'au/bef32.au'
20 | format:
21 | formatID: 'lpcm'
22 | sampleRate: 44100
23 | bitsPerChannel: 32
24 | channelsPerFrame: 2
25 | bytesPerPacket: 8
26 | framesPerPacket: 1
27 | littleEndian: false
28 | floatingPoint: true
29 | duration: 7430
30 | data: '52dbaba2'
31 |
32 | demuxerTest 'alaw',
33 | file: 'au/alaw.au'
34 | format:
35 | formatID: 'alaw'
36 | sampleRate: 44100
37 | bitsPerChannel: 8
38 | channelsPerFrame: 2
39 | bytesPerPacket: 2
40 | framesPerPacket: 1
41 | littleEndian: false
42 | floatingPoint: false
43 | duration: 7430
44 | data: 'e49cda0c'
45 |
46 | demuxerTest 'ulaw',
47 | file: 'au/ulaw.au'
48 | format:
49 | formatID: 'ulaw'
50 | sampleRate: 44100
51 | bitsPerChannel: 8
52 | channelsPerFrame: 2
53 | bytesPerPacket: 2
54 | framesPerPacket: 1
55 | littleEndian: false
56 | floatingPoint: false
57 | duration: 7430
58 | data: '18b71b9b'
--------------------------------------------------------------------------------
/tests/demuxers/wave.coffee:
--------------------------------------------------------------------------------
1 | #import "shared.coffee"
2 |
3 | module 'demuxers/wave', ->
4 | demuxerTest 'lei16',
5 | file: 'wave/lei16.wav'
6 | format:
7 | formatID: 'lpcm'
8 | sampleRate: 44100
9 | bitsPerChannel: 16
10 | channelsPerFrame: 2
11 | bytesPerPacket: 4
12 | framesPerPacket: 1
13 | littleEndian: true
14 | floatingPoint: false
15 | duration: 8916
16 | data: '6b6b722b'
17 |
18 | demuxerTest 'lef32',
19 | file: 'wave/lef32.wav'
20 | format:
21 | formatID: 'lpcm'
22 | sampleRate: 44100
23 | bitsPerChannel: 32
24 | channelsPerFrame: 2
25 | bytesPerPacket: 8
26 | framesPerPacket: 1
27 | littleEndian: true
28 | floatingPoint: true
29 | duration: 8916
30 | data: '9b2a9317'
31 |
32 | demuxerTest 'ulaw',
33 | file: 'wave/ulaw.wav'
34 | format:
35 | formatID: 'ulaw'
36 | sampleRate: 44100
37 | bitsPerChannel: 8
38 | channelsPerFrame: 2
39 | bytesPerPacket: 2
40 | framesPerPacket: 1
41 | littleEndian: false
42 | floatingPoint: false
43 | duration: 8916
44 | data: '1af5b4fe'
45 |
46 | demuxerTest 'read the full fmt chunk',
47 | file: 'wave/issue35.wav'
48 | format:
49 | formatID: 'lpcm'
50 | sampleRate: 44100
51 | bitsPerChannel: 16
52 | channelsPerFrame: 2
53 | bytesPerPacket: 4
54 | framesPerPacket: 1
55 | littleEndian: true
56 | floatingPoint: false
57 | duration: 8916
58 | data: '82d0f0ea'
59 |
--------------------------------------------------------------------------------
/src/decoders/xlaw.coffee:
--------------------------------------------------------------------------------
1 | class XLAWDecoder extends AV.Decoder
2 | AV.Decoder.register('ulaw', XLAWDecoder)
3 | AV.Decoder.register('alaw', XLAWDecoder)
4 |
5 | SIGN_BIT = 0x80
6 | QUANT_MASK = 0xf
7 | SEG_SHIFT = 4
8 | SEG_MASK = 0x70
9 | BIAS = 0x84
10 |
11 | init: ->
12 | @format.bitsPerChannel = 16
13 | @table = table = new Int16Array(256)
14 |
15 | if @format.formatID is 'ulaw'
16 | for i in [0...256]
17 | # Complement to obtain normal u-law value.
18 | val = ~i
19 |
20 | # Extract and bias the quantization bits. Then
21 | # shift up by the segment number and subtract out the bias.
22 | t = ((val & QUANT_MASK) << 3) + BIAS
23 | t <<= (val & SEG_MASK) >>> SEG_SHIFT
24 |
25 | table[i] = if val & SIGN_BIT then BIAS - t else t - BIAS
26 |
27 | else
28 | for i in [0...256]
29 | val = i ^ 0x55
30 | t = val & QUANT_MASK
31 | seg = (val & SEG_MASK) >>> SEG_SHIFT
32 |
33 | if seg
34 | t = (t + t + 1 + 32) << (seg + 2)
35 | else
36 | t = (t + t + 1) << 3
37 |
38 | table[i] = if val & SIGN_BIT then t else -t
39 |
40 | return
41 |
42 | readChunk: =>
43 | {stream, table} = this
44 |
45 | samples = Math.min(4096, @stream.remainingBytes())
46 | return if samples is 0
47 |
48 | output = new Int16Array(samples)
49 | for i in [0...samples] by 1
50 | output[i] = table[stream.readUInt8()]
51 |
52 | return output
--------------------------------------------------------------------------------
/src/device.coffee:
--------------------------------------------------------------------------------
1 | #
2 | # The AudioDevice class is responsible for interfacing with various audio
3 | # APIs in browsers, and for keeping track of the current playback time
4 | # based on the device hardware time and the play/pause/seek state
5 | #
6 |
7 | class AV.AudioDevice extends AV.EventEmitter
8 | constructor: (@sampleRate, @channels) ->
9 | @playing = false
10 | @currentTime = 0
11 | @_lastTime = 0
12 |
13 | start: ->
14 | return if @playing
15 | @playing = true
16 |
17 | @device ?= AV.AudioDevice.create(@sampleRate, @channels)
18 | unless @device
19 | throw new Error "No supported audio device found."
20 |
21 | @_lastTime = @device.getDeviceTime()
22 |
23 | @_timer = setInterval @updateTime, 200
24 | @device.on 'refill', @refill = (buffer) =>
25 | @emit 'refill', buffer
26 |
27 | stop: ->
28 | return unless @playing
29 | @playing = false
30 |
31 | @device.off 'refill', @refill
32 | clearInterval @_timer
33 |
34 | destroy: ->
35 | @stop()
36 | @device.destroy()
37 |
38 | seek: (@currentTime) ->
39 | @_lastTime = @device.getDeviceTime() if @playing
40 | @emit 'timeUpdate', @currentTime
41 |
42 | updateTime: =>
43 | time = @device.getDeviceTime()
44 | @currentTime += (time - @_lastTime) / @device.sampleRate * 1000 | 0
45 | @_lastTime = time
46 | @emit 'timeUpdate', @currentTime
47 |
48 | devices = []
49 | @register: (device) ->
50 | devices.push(device)
51 |
52 | @create: (sampleRate, channels) ->
53 | for device in devices when device.supported
54 | return new device(sampleRate, channels)
55 |
56 | return null
--------------------------------------------------------------------------------
/src/demuxers/au.coffee:
--------------------------------------------------------------------------------
1 | class AUDemuxer extends AV.Demuxer
2 | AV.Demuxer.register(AUDemuxer)
3 |
4 | @probe: (buffer) ->
5 | return buffer.peekString(0, 4) is '.snd'
6 |
7 | bps = [8, 8, 16, 24, 32, 32, 64]
8 | bps[26] = 8
9 |
10 | formats =
11 | 1: 'ulaw'
12 | 27: 'alaw'
13 |
14 | readChunk: ->
15 | if not @readHeader and @stream.available(24)
16 | if @stream.readString(4) isnt '.snd'
17 | return @emit 'error', 'Invalid AU file.'
18 |
19 | size = @stream.readUInt32()
20 | dataSize = @stream.readUInt32()
21 | encoding = @stream.readUInt32()
22 |
23 | @format =
24 | formatID: formats[encoding] or 'lpcm'
25 | littleEndian: false
26 | floatingPoint: encoding in [6, 7]
27 | bitsPerChannel: bps[encoding - 1]
28 | sampleRate: @stream.readUInt32()
29 | channelsPerFrame: @stream.readUInt32()
30 | framesPerPacket: 1
31 |
32 | if not @format.bitsPerChannel?
33 | return @emit 'error', 'Unsupported encoding in AU file.'
34 |
35 | @format.bytesPerPacket = (@format.bitsPerChannel / 8) * @format.channelsPerFrame
36 |
37 | if dataSize isnt 0xffffffff
38 | bytes = @format.bitsPerChannel / 8
39 | @emit 'duration', dataSize / bytes / @format.channelsPerFrame / @format.sampleRate * 1000 | 0
40 |
41 | @emit 'format', @format
42 | @readHeader = true
43 |
44 | if @readHeader
45 | while @stream.available(1)
46 | @emit 'data', @stream.readSingleBuffer(@stream.remainingBytes())
47 |
48 | return
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | Aurora.js
2 | =========
3 |
4 | Aurora.js is a framework that makes writing audio decoders in JavaScript easier. It handles common
5 | tasks for you such as dealing with binary data, and the decoding pipeline from source to demuxer to
6 | decoder, and finally to the audio hardware itself by abstracting browser audio APIs. Aurora contains
7 | two high level APIs for inspecting and playing back decoded audio, and it is easily extendible to support
8 | more sources, demuxers, decoders, and audio devices.
9 |
10 | Check out the [documentation](https://github.com/audiocogs/aurora.js/wiki) to learn more about using and
11 | extending Aurora.
12 |
13 | ## Demo
14 |
15 | We have written several decoders using Aurora.js, whose demos you can find [here](http://audiocogs.org/codecs/)
16 | and whose source code can be found on our [Github](https://github.com/audiocogs/) page.
17 |
18 | ## Authors
19 |
20 | Aurora.js was written by [@jensnockert](https://github.com/jensnockert) and [@devongovett](https://github.com/devongovett)
21 | of [Audiocogs](https://github.com/audiocogs/).
22 |
23 | ## Building
24 |
25 | Currently, the [importer](https://github.com/devongovett/importer) module is used to build Aurora.js. You can run
26 | the development server by first installing `importer` with npm, and then running it like this:
27 |
28 | npm install importer -g
29 | importer browser.coffee -p 8080
30 |
31 | You can also build a static version like this:
32 |
33 | importer browser.coffee aurora.js
34 |
35 | By itself, Aurora will play LPCM, uLaw and aLaw files in a number of containers. Be sure to add additional codec support
36 | by including some of our other decoders such as [FLAC.js](https://github.com/audiocogs/flac.js),
37 | [ALAC.js](https://github.com/audiocogs/alac.js), and [MP3.js](https://github.com/devongovett/mp3.js).
38 |
39 | ## License
40 |
41 | Aurora.js is released under the MIT license.
--------------------------------------------------------------------------------
/tests/demuxers/aiff.coffee:
--------------------------------------------------------------------------------
1 | #import "shared.coffee"
2 |
3 | module 'demuxers/aiff', ->
4 | demuxerTest 'bei16',
5 | file: 'aiff/bei16.aiff'
6 | format:
7 | formatID: 'lpcm'
8 | sampleRate: 44100
9 | bitsPerChannel: 16
10 | channelsPerFrame: 2
11 | bytesPerPacket: 4
12 | framesPerPacket: 1
13 | sampleCount: 347379
14 | littleEndian: false
15 | floatingPoint: false
16 | duration: 7877
17 | data: '35da18ed'
18 |
19 | demuxerTest 'lei16',
20 | file: 'aiff/lei16.aifc'
21 | format:
22 | formatID: 'lpcm'
23 | sampleRate: 44100
24 | bitsPerChannel: 16
25 | channelsPerFrame: 2
26 | bytesPerPacket: 4
27 | framesPerPacket: 1
28 | sampleCount: 347379
29 | littleEndian: true
30 | floatingPoint: false
31 | duration: 7877
32 | data: 'dba3f225'
33 |
34 | demuxerTest 'bef32',
35 | file: 'aiff/bef32.aifc'
36 | format:
37 | formatID: 'lpcm'
38 | sampleRate: 44100
39 | bitsPerChannel: 32
40 | channelsPerFrame: 2
41 | bytesPerPacket: 8
42 | framesPerPacket: 1
43 | sampleCount: 347379
44 | littleEndian: false
45 | floatingPoint: true
46 | duration: 7877
47 | data: 'db37e290'
48 |
49 | demuxerTest 'alaw',
50 | file: 'aiff/alaw.aifc'
51 | format:
52 | formatID: 'alaw'
53 | sampleRate: 44100
54 | bitsPerChannel: 8
55 | channelsPerFrame: 2
56 | bytesPerPacket: 2
57 | framesPerPacket: 1
58 | sampleCount: 347379
59 | littleEndian: false
60 | floatingPoint: false
61 | duration: 7877
62 | data: 'f4b20b9b'
--------------------------------------------------------------------------------
/tests/demuxers/shared.coffee:
--------------------------------------------------------------------------------
1 | #import "../crc32.coffee"
2 |
3 | demuxerTest = (name, config) ->
4 | assert.asyncTest name, ->
5 | if AV.isNode
6 | source = new AV.FileSource "#{__dirname}/data/#{config.file}"
7 | else
8 | source = new AV.HTTPSource "#{HTTP_BASE}/data/#{config.file}"
9 |
10 | source.once 'data', (chunk) ->
11 | Demuxer = AV.Demuxer.find(chunk)
12 | demuxer = new Demuxer(source, chunk)
13 |
14 | expect = config.format? + config.duration? + config.metadata? + config.chapters? + config.cookie? + config.data?
15 | assert.expect(expect)
16 |
17 | if config.format
18 | demuxer.once 'format', (format) ->
19 | assert.deepEqual format, config.format
20 |
21 | if config.duration
22 | demuxer.once 'duration', (duration) ->
23 | assert.equal duration, config.duration
24 |
25 | if config.metadata
26 | demuxer.once 'metadata', (metadata) ->
27 | # generate coverArt CRC
28 | if metadata.coverArt
29 | crc = new CRC32()
30 | crc.update metadata.coverArt
31 | metadata.coverArt = crc.toHex()
32 |
33 | assert.deepEqual metadata, config.metadata
34 |
35 | if config.chapters
36 | demuxer.once 'chapters', (chapters) ->
37 | assert.deepEqual chapters, config.chapters
38 |
39 | if config.data
40 | crc = new CRC32
41 | demuxer.on 'data', (buffer) ->
42 | crc.update(buffer)
43 |
44 | demuxer.on 'end', ->
45 | if config.data
46 | assert.equal crc.toHex(), config.data
47 |
48 | assert.start()
49 |
50 | source.start()
--------------------------------------------------------------------------------
/tests/demuxers/caf.coffee:
--------------------------------------------------------------------------------
1 | #import "shared.coffee"
2 |
3 | module 'demuxers/caf', ->
4 | demuxerTest 'base',
5 | file: 'caf/aac.caf'
6 | format:
7 | formatID: 'aac '
8 | sampleRate: 44100
9 | bitsPerChannel: 0
10 | channelsPerFrame: 2
11 | bytesPerPacket: 0
12 | framesPerPacket: 1024
13 | duration: 38659
14 | data: 'd21b23ee'
15 |
16 | demuxerTest 'bei16',
17 | file: 'caf/bei16.caf'
18 | format:
19 | formatID: 'lpcm'
20 | sampleRate: 44100
21 | bitsPerChannel: 16
22 | channelsPerFrame: 2
23 | bytesPerPacket: 4
24 | framesPerPacket: 1
25 | floatingPoint: false
26 | littleEndian: false
27 | duration: 38659
28 | data: '4f427df9'
29 |
30 | demuxerTest 'lei32',
31 | file: 'caf/lei32.caf'
32 | format:
33 | formatID: 'lpcm'
34 | sampleRate: 44100
35 | bitsPerChannel: 32
36 | channelsPerFrame: 2
37 | bytesPerPacket: 8
38 | framesPerPacket: 1
39 | floatingPoint: false
40 | littleEndian: true
41 | duration: 38659
42 | data: '771d822a'
43 |
44 | demuxerTest 'bef32',
45 | file: 'caf/bef32.caf'
46 | format:
47 | formatID: 'lpcm'
48 | sampleRate: 44100
49 | bitsPerChannel: 32
50 | channelsPerFrame: 2
51 | bytesPerPacket: 8
52 | framesPerPacket: 1
53 | floatingPoint: true
54 | littleEndian: false
55 | duration: 38659
56 | data: '7bf9d9d2'
57 |
58 | demuxerTest 'lef64',
59 | file: 'caf/lef64.caf'
60 | format:
61 | formatID: 'lpcm'
62 | sampleRate: 44100
63 | bitsPerChannel: 64
64 | channelsPerFrame: 2
65 | bytesPerPacket: 16
66 | framesPerPacket: 1
67 | floatingPoint: true
68 | littleEndian: true
69 | duration: 38659
70 | data: '9a3372e'
--------------------------------------------------------------------------------
/src/decoders/lpcm.coffee:
--------------------------------------------------------------------------------
1 | class LPCMDecoder extends AV.Decoder
2 | AV.Decoder.register('lpcm', LPCMDecoder)
3 |
4 | readChunk: =>
5 | stream = @stream
6 | littleEndian = @format.littleEndian
7 | chunkSize = Math.min(4096, stream.remainingBytes())
8 | samples = chunkSize / (@format.bitsPerChannel / 8) | 0
9 |
10 | if chunkSize < @format.bitsPerChannel / 8
11 | return null
12 |
13 | if @format.floatingPoint
14 | switch @format.bitsPerChannel
15 | when 32
16 | output = new Float32Array(samples)
17 | for i in [0...samples] by 1
18 | output[i] = stream.readFloat32(littleEndian)
19 |
20 | when 64
21 | output = new Float64Array(samples)
22 | for i in [0...samples] by 1
23 | output[i] = stream.readFloat64(littleEndian)
24 |
25 | else
26 | throw new Error 'Unsupported bit depth.'
27 |
28 | else
29 | switch @format.bitsPerChannel
30 | when 8
31 | output = new Int8Array(samples)
32 | for i in [0...samples] by 1
33 | output[i] = stream.readInt8()
34 |
35 | when 16
36 | output = new Int16Array(samples)
37 | for i in [0...samples] by 1
38 | output[i] = stream.readInt16(littleEndian)
39 |
40 | when 24
41 | output = new Int32Array(samples)
42 | for i in [0...samples] by 1
43 | output[i] = stream.readInt24(littleEndian)
44 |
45 | when 32
46 | output = new Int32Array(samples)
47 | for i in [0...samples] by 1
48 | output[i] = stream.readInt32(littleEndian)
49 |
50 | else
51 | throw new Error 'Unsupported bit depth.'
52 |
53 | return output
--------------------------------------------------------------------------------
/src/decoder.coffee:
--------------------------------------------------------------------------------
1 | class AV.Decoder extends AV.EventEmitter
2 | constructor: (@demuxer, @format) ->
3 | list = new AV.BufferList
4 | @stream = new AV.Stream(list)
5 | @bitstream = new AV.Bitstream(@stream)
6 |
7 | @receivedFinalBuffer = false
8 | @waiting = false
9 |
10 | @demuxer.on 'cookie', (cookie) =>
11 | try
12 | @setCookie cookie
13 | catch error
14 | @emit 'error', error
15 |
16 | @demuxer.on 'data', (chunk) =>
17 | list.append chunk
18 | @decode() if @waiting
19 |
20 | @demuxer.on 'end', =>
21 | @receivedFinalBuffer = true
22 | @decode() if @waiting
23 |
24 | @init()
25 |
26 | init: ->
27 | return
28 |
29 | setCookie: (cookie) ->
30 | return
31 |
32 | readChunk: ->
33 | return
34 |
35 | decode: ->
36 | @waiting = false
37 | offset = @bitstream.offset()
38 |
39 | try
40 | packet = @readChunk()
41 | catch error
42 | if error not instanceof AV.UnderflowError
43 | @emit 'error', error
44 | return false
45 |
46 | # if a packet was successfully read, emit it
47 | if packet
48 | @emit 'data', packet
49 | return true
50 |
51 | # if we haven't reached the end, jump back and try again when we have more data
52 | else if not @receivedFinalBuffer
53 | @bitstream.seek offset
54 | @waiting = true
55 |
56 | # otherwise we've reached the end
57 | else
58 | @emit 'end'
59 |
60 | return false
61 |
62 | seek: (timestamp) ->
63 | # use the demuxer to get a seek point
64 | seekPoint = @demuxer.seek(timestamp)
65 | @stream.seek(seekPoint.offset)
66 | return seekPoint.timestamp
67 |
68 | codecs = {}
69 | @register: (id, decoder) ->
70 | codecs[id] = decoder
71 |
72 | @find: (id) ->
73 | return codecs[id] or null
--------------------------------------------------------------------------------
/src/devices/mozilla.coffee:
--------------------------------------------------------------------------------
1 | class MozillaAudioDevice extends AV.EventEmitter
2 | AV.AudioDevice.register(MozillaAudioDevice)
3 |
4 | # determine whether this device is supported by the browser
5 | @supported: Audio? and 'mozWriteAudio' of new Audio
6 |
7 | constructor: (@sampleRate, @channels) ->
8 | @audio = new Audio
9 | @audio.mozSetup(@channels, @sampleRate)
10 |
11 | @writePosition = 0
12 | @prebufferSize = @sampleRate / 2
13 | @tail = null
14 |
15 | @timer = createTimer @refill, 100
16 |
17 | refill: =>
18 | if @tail
19 | written = @audio.mozWriteAudio(@tail)
20 | @writePosition += written
21 |
22 | if @writePosition < @tail.length
23 | @tail = @tail.subarray(written)
24 | else
25 | @tail = null
26 |
27 | currentPosition = @audio.mozCurrentSampleOffset()
28 | available = currentPosition + @prebufferSize - @writePosition
29 | if available > 0
30 | buffer = new Float32Array(available)
31 | @emit 'refill', buffer
32 |
33 | written = @audio.mozWriteAudio(buffer)
34 | if written < buffer.length
35 | @tail = buffer.subarray(written)
36 |
37 | @writePosition += written
38 |
39 | return
40 |
41 | destroy: ->
42 | destroyTimer @timer
43 |
44 | getDeviceTime: ->
45 | return @audio.mozCurrentSampleOffset() / @channels
46 |
47 | # Use an inline worker to get setInterval
48 | # without being clamped in background tabs
49 | createTimer = (fn, interval) ->
50 | url = AV.Buffer.makeBlobURL("setInterval(function() { postMessage('ping'); }, #{interval});")
51 | return setInterval fn, interval unless url?
52 |
53 | worker = new Worker(url)
54 | worker.onmessage = fn
55 | worker.url = url
56 |
57 | return worker
58 |
59 | destroyTimer = (timer) ->
60 | if timer.terminate
61 | timer.terminate()
62 | URL.revokeObjectURL(timer.url)
63 | else
64 | clearInterval timer
--------------------------------------------------------------------------------
/src/devices/webaudio.coffee:
--------------------------------------------------------------------------------
1 | #import "resampler.js"
2 |
3 | class WebAudioDevice extends AV.EventEmitter
4 | AV.AudioDevice.register(WebAudioDevice)
5 |
6 | # determine whether this device is supported by the browser
7 | AudioContext = global.AudioContext or global.webkitAudioContext
8 | @supported = AudioContext and
9 | (typeof AudioContext::[createProcessor = 'createScriptProcessor'] is 'function' or
10 | typeof AudioContext::[createProcessor = 'createJavaScriptNode'] is 'function')
11 |
12 | # Chrome limits the number of AudioContexts that one can create,
13 | # so use a lazily created shared context for all playback
14 | sharedContext = null
15 |
16 | constructor: (@sampleRate, @channels) ->
17 | @context = sharedContext ?= new AudioContext
18 | @deviceSampleRate = @context.sampleRate
19 |
20 | # calculate the buffer size to read
21 | @bufferSize = Math.ceil(4096 / (@deviceSampleRate / @sampleRate) * @channels)
22 | @bufferSize += @bufferSize % @channels
23 |
24 | # if the sample rate doesn't match the hardware sample rate, create a resampler
25 | if @deviceSampleRate isnt @sampleRate
26 | @resampler = new Resampler(@sampleRate, @deviceSampleRate, @channels, 4096 * @channels)
27 |
28 | @node = @context[createProcessor](4096, @channels, @channels)
29 | @node.onaudioprocess = @refill
30 | @node.connect(@context.destination)
31 |
32 | refill: (event) =>
33 | outputBuffer = event.outputBuffer
34 | channelCount = outputBuffer.numberOfChannels
35 | channels = new Array(channelCount)
36 |
37 | # get output channels
38 | for i in [0...channelCount] by 1
39 | channels[i] = outputBuffer.getChannelData(i)
40 |
41 | # get audio data
42 | data = new Float32Array(@bufferSize)
43 | @emit 'refill', data
44 |
45 | # resample if necessary
46 | if @resampler
47 | data = @resampler.resampler(data)
48 |
49 | # write data to output
50 | for i in [0...outputBuffer.length] by 1
51 | for n in [0...channelCount] by 1
52 | channels[n][i] = data[i * channelCount + n]
53 |
54 | return
55 |
56 | destroy: ->
57 | @node.disconnect(0)
58 |
59 | getDeviceTime: ->
60 | return @context.currentTime * @sampleRate
--------------------------------------------------------------------------------
/src/sources/browser/http.coffee:
--------------------------------------------------------------------------------
1 | class AV.HTTPSource extends AV.EventEmitter
2 | constructor: (@url) ->
3 | @chunkSize = 1 << 20
4 | @inflight = false
5 | @reset()
6 |
7 | start: ->
8 | if @length
9 | return @loop() unless @inflight
10 |
11 | @inflight = true
12 | @xhr = new XMLHttpRequest()
13 |
14 | @xhr.onload = (event) =>
15 | @length = parseInt @xhr.getResponseHeader("Content-Length")
16 | @inflight = false
17 | @loop()
18 |
19 | @xhr.onerror = (err) =>
20 | @pause()
21 | @emit 'error', err
22 |
23 | @xhr.onabort = (event) =>
24 | @inflight = false
25 |
26 | @xhr.open("HEAD", @url, true)
27 | @xhr.send(null)
28 |
29 | loop: ->
30 | if @inflight or not @length
31 | return @emit 'error', 'Something is wrong in HTTPSource.loop'
32 |
33 | @inflight = true
34 | @xhr = new XMLHttpRequest()
35 |
36 | @xhr.onload = (event) =>
37 | if @xhr.response
38 | buf = new Uint8Array(@xhr.response)
39 | else
40 | txt = @xhr.responseText
41 | buf = new Uint8Array(txt.length)
42 | for i in [0...txt.length]
43 | buf[i] = txt.charCodeAt(i) & 0xff
44 |
45 | buffer = new AV.Buffer(buf)
46 | @offset += buffer.length
47 |
48 | @emit 'data', buffer
49 | @emit 'end' if @offset >= @length
50 |
51 | @inflight = false
52 | @loop() unless @offset >= @length
53 |
54 | @xhr.onprogress = (event) =>
55 | @emit 'progress', (@offset + event.loaded) / @length * 100
56 |
57 | @xhr.onerror = (err) =>
58 | @emit 'error', err
59 | @pause()
60 |
61 | @xhr.onabort = (event) =>
62 | @inflight = false
63 |
64 | @xhr.open("GET", @url, true)
65 | @xhr.responseType = "arraybuffer"
66 |
67 | endPos = Math.min(@offset + @chunkSize, @length)
68 | @xhr.setRequestHeader("Range", "bytes=#{@offset}-#{endPos}")
69 | @xhr.overrideMimeType('text/plain; charset=x-user-defined')
70 | @xhr.send(null)
71 |
72 | pause: ->
73 | @inflight = false
74 | @xhr?.abort()
75 |
76 | reset: ->
77 | @pause()
78 | @offset = 0
--------------------------------------------------------------------------------
/src/core/buffer.coffee:
--------------------------------------------------------------------------------
1 | class AV.Buffer
2 | constructor: (input) ->
3 | if input instanceof Uint8Array # Uint8Array
4 | @data = input
5 |
6 | else if input instanceof ArrayBuffer or # ArrayBuffer
7 | Array.isArray(input) or # normal JS Array
8 | typeof input is 'number' or # number (i.e. length)
9 | AV.isNode and global.Buffer?.isBuffer(input) # Node Buffer
10 | @data = new Uint8Array(input)
11 |
12 | else if input.buffer instanceof ArrayBuffer # typed arrays other than Uint8Array
13 | @data = new Uint8Array(input.buffer, input.byteOffset, input.length * input.BYTES_PER_ELEMENT)
14 |
15 | else if input instanceof AV.Buffer # AV.Buffer, make a shallow copy
16 | @data = input.data
17 |
18 | else
19 | throw new Error "Constructing buffer with unknown type."
20 |
21 | @length = @data.length
22 |
23 | # used when the buffer is part of a bufferlist
24 | @next = null
25 | @prev = null
26 |
27 | @allocate: (size) ->
28 | return new AV.Buffer(size)
29 |
30 | copy: ->
31 | return new AV.Buffer(new Uint8Array(@data))
32 |
33 | slice: (position, length = @length) ->
34 | if position is 0 and length >= @length
35 | return new AV.Buffer(@data)
36 | else
37 | return new AV.Buffer(@data.subarray(position, position + length))
38 |
39 | # prefix-free
40 | BlobBuilder = global.BlobBuilder or global.MozBlobBuilder or global.WebKitBlobBuilder
41 | URL = global.URL or global.webkitURL or global.mozURL
42 |
43 | @makeBlob: (data, type = 'application/octet-stream') ->
44 | # try the Blob constructor
45 | try
46 | return new Blob [data], type: type
47 |
48 | # use the old BlobBuilder
49 | if BlobBuilder?
50 | bb = new BlobBuilder
51 | bb.append data
52 | return bb.getBlob(type)
53 |
54 | # oops, no blobs supported :(
55 | return null
56 |
57 | @makeBlobURL: (data, type) ->
58 | return URL?.createObjectURL @makeBlob(data, type)
59 |
60 | @revokeBlobURL: (url) ->
61 | URL?.revokeObjectURL url
62 |
63 | toBlob: ->
64 | return Buffer.makeBlob @data.buffer
65 |
66 | toBlobURL: ->
67 | return Buffer.makeBlobURL @data.buffer
--------------------------------------------------------------------------------
/src/demuxer.coffee:
--------------------------------------------------------------------------------
1 | class AV.Demuxer extends AV.EventEmitter
2 | @probe: (buffer) ->
3 | return false
4 |
5 | constructor: (source, chunk) ->
6 | list = new AV.BufferList
7 | list.append chunk
8 | @stream = new AV.Stream(list)
9 |
10 | received = false
11 | source.on 'data', (chunk) =>
12 | received = true
13 | list.append chunk
14 | @readChunk chunk
15 |
16 | source.on 'error', (err) =>
17 | @emit 'error', err
18 |
19 | source.on 'end', =>
20 | # if there was only one chunk received, read it
21 | @readChunk chunk unless received
22 | @emit 'end'
23 |
24 | @seekPoints = []
25 | @init()
26 |
27 | init: ->
28 | return
29 |
30 | readChunk: (chunk) ->
31 | return
32 |
33 | addSeekPoint: (offset, timestamp) ->
34 | index = @searchTimestamp timestamp
35 | @seekPoints.splice index, 0,
36 | offset: offset
37 | timestamp: timestamp
38 |
39 | searchTimestamp: (timestamp, backward) ->
40 | low = 0
41 | high = @seekPoints.length
42 |
43 | # optimize appending entries
44 | if high > 0 and @seekPoints[high - 1].timestamp < timestamp
45 | return high
46 |
47 | while low < high
48 | mid = (low + high) >> 1
49 | time = @seekPoints[mid].timestamp
50 |
51 | if time < timestamp
52 | low = mid + 1
53 |
54 | else if time >= timestamp
55 | high = mid
56 |
57 | if high > @seekPoints.length
58 | high = @seekPoints.length
59 |
60 | return high
61 |
62 | seek: (timestamp) ->
63 | if @format and @format.framesPerPacket > 0 and @format.bytesPerPacket > 0
64 | seekPoint =
65 | timestamp: timestamp
66 | offset: @format.bytesPerPacket * timestamp / @format.framesPerPacket
67 |
68 | return seekPoint
69 | else
70 | index = @searchTimestamp timestamp
71 | return @seekPoints[index]
72 |
73 | formats = []
74 | @register: (demuxer) ->
75 | formats.push demuxer
76 |
77 | @find: (buffer) ->
78 | stream = AV.Stream.fromBuffer(buffer)
79 | for format in formats when format.probe(stream)
80 | return format
81 |
82 | return null
--------------------------------------------------------------------------------
/src/demuxers/wave.coffee:
--------------------------------------------------------------------------------
1 | class WAVEDemuxer extends AV.Demuxer
2 | AV.Demuxer.register(WAVEDemuxer)
3 |
4 | @probe: (buffer) ->
5 | return buffer.peekString(0, 4) is 'RIFF' &&
6 | buffer.peekString(8, 4) is 'WAVE'
7 |
8 | formats =
9 | 0x0001: 'lpcm'
10 | 0x0003: 'lpcm'
11 | 0x0006: 'alaw'
12 | 0x0007: 'ulaw'
13 |
14 | readChunk: ->
15 | if not @readStart and @stream.available(12)
16 | if @stream.readString(4) isnt 'RIFF'
17 | return @emit 'error', 'Invalid WAV file.'
18 |
19 | @fileSize = @stream.readUInt32(true)
20 | @readStart = true
21 |
22 | if @stream.readString(4) isnt 'WAVE'
23 | return @emit 'error', 'Invalid WAV file.'
24 |
25 | while @stream.available(1)
26 | if not @readHeaders and @stream.available(8)
27 | @type = @stream.readString(4)
28 | @len = @stream.readUInt32(true) # little endian
29 |
30 | switch @type
31 | when 'fmt '
32 | encoding = @stream.readUInt16(true)
33 | if encoding not of formats
34 | return @emit 'error', 'Unsupported format in WAV file.'
35 |
36 | @format =
37 | formatID: formats[encoding]
38 | floatingPoint: encoding is 0x0003
39 | littleEndian: formats[encoding] is 'lpcm'
40 | channelsPerFrame: @stream.readUInt16(true)
41 | sampleRate: @stream.readUInt32(true)
42 | framesPerPacket: 1
43 |
44 | @stream.advance(4) # bytes/sec.
45 | @stream.advance(2) # block align
46 |
47 | @format.bitsPerChannel = @stream.readUInt16(true)
48 | @format.bytesPerPacket = (@format.bitsPerChannel / 8) * @format.channelsPerFrame
49 |
50 | @emit 'format', @format
51 |
52 | # Advance to the next chunk
53 | @stream.advance(@len - 16)
54 |
55 | when 'data'
56 | if not @sentDuration
57 | bytes = @format.bitsPerChannel / 8
58 | @emit 'duration', @len / bytes / @format.channelsPerFrame / @format.sampleRate * 1000 | 0
59 | @sentDuration = true
60 |
61 | buffer = @stream.readSingleBuffer(@len)
62 | @len -= buffer.length
63 | @readHeaders = @len > 0
64 | @emit 'data', buffer
65 |
66 | else
67 | return unless @stream.available(@len)
68 | @stream.advance(@len)
69 |
70 | @readHeaders = false unless @type is 'data'
71 |
72 | return
--------------------------------------------------------------------------------
/src/demuxers/aiff.coffee:
--------------------------------------------------------------------------------
1 | class AIFFDemuxer extends AV.Demuxer
2 | AV.Demuxer.register(AIFFDemuxer)
3 |
4 | @probe: (buffer) ->
5 | return buffer.peekString(0, 4) is 'FORM' &&
6 | buffer.peekString(8, 4) in ['AIFF', 'AIFC']
7 |
8 | readChunk: ->
9 | if not @readStart and @stream.available(12)
10 | if @stream.readString(4) isnt 'FORM'
11 | return @emit 'error', 'Invalid AIFF.'
12 |
13 | @fileSize = @stream.readUInt32()
14 | @fileType = @stream.readString(4)
15 | @readStart = true
16 |
17 | if @fileType not in ['AIFF', 'AIFC']
18 | return @emit 'error', 'Invalid AIFF.'
19 |
20 | while @stream.available(1)
21 | if not @readHeaders and @stream.available(8)
22 | @type = @stream.readString(4)
23 | @len = @stream.readUInt32()
24 |
25 | switch @type
26 | when 'COMM'
27 | return unless @stream.available(@len)
28 |
29 | @format =
30 | formatID: 'lpcm'
31 | channelsPerFrame: @stream.readUInt16()
32 | sampleCount: @stream.readUInt32()
33 | bitsPerChannel: @stream.readUInt16()
34 | sampleRate: @stream.readFloat80()
35 | framesPerPacket: 1
36 | littleEndian: false
37 | floatingPoint: false
38 |
39 | @format.bytesPerPacket = (@format.bitsPerChannel / 8) * @format.channelsPerFrame
40 |
41 | if @fileType is 'AIFC'
42 | format = @stream.readString(4)
43 |
44 | @format.littleEndian = format is 'sowt' and @format.bitsPerChannel > 8
45 | @format.floatingPoint = format in ['fl32', 'fl64']
46 |
47 | format = 'lpcm' if format in ['twos', 'sowt', 'fl32', 'fl64', 'NONE']
48 | @format.formatID = format
49 | @len -= 4
50 |
51 | @stream.advance(@len - 18)
52 | @emit 'format', @format
53 | @emit 'duration', @format.sampleCount / @format.sampleRate * 1000 | 0
54 |
55 | when 'SSND'
56 | unless @readSSNDHeader and @stream.available(4)
57 | offset = @stream.readUInt32()
58 | @stream.advance(4) # skip block size
59 | @stream.advance(offset) # skip to data
60 | @readSSNDHeader = true
61 |
62 | buffer = @stream.readSingleBuffer(@len)
63 | @len -= buffer.length
64 | @readHeaders = @len > 0
65 | @emit 'data', buffer
66 |
67 | else
68 | return unless @stream.available(@len)
69 | @stream.advance(@len)
70 |
71 | @readHeaders = false unless @type is 'SSND'
72 |
73 | return
--------------------------------------------------------------------------------
/tests/sources/buffer.coffee:
--------------------------------------------------------------------------------
1 | #import "../crc32.coffee"
2 |
3 | module 'sources/buffer', ->
4 | asyncTest = assert.asyncTest
5 | buffer = null
6 |
7 | getData = (fn) ->
8 | return fn() if buffer
9 |
10 | # if we're in Node, we can read any file we like, otherwise simulate by reading
11 | # a blob from an XHR and loading it using a FileSource
12 | if AV.isNode
13 | require('fs').readFile "#{__dirname}/data/m4a/base.m4a", (err, data) ->
14 | buffer = new Uint8Array(data)
15 | fn()
16 | else
17 | xhr = new XMLHttpRequest
18 | xhr.open 'GET', "#{HTTP_BASE}/data/m4a/base.m4a"
19 | xhr.responseType = 'arraybuffer'
20 | xhr.send()
21 | xhr.onload = ->
22 | buffer = new Uint8Array(xhr.response)
23 | fn()
24 |
25 | asyncTest 'single AV.Buffer', ->
26 | getData ->
27 | crc = new CRC32
28 | source = new AV.BufferSource new AV.Buffer(buffer)
29 |
30 | source.on 'data', (chunk) ->
31 | crc.update chunk
32 |
33 | source.on 'progress', (progress) ->
34 | assert.equal progress, 100
35 |
36 | source.on 'end', ->
37 | assert.equal crc.toHex(), '84d9f967'
38 | assert.start()
39 |
40 | source.start()
41 |
42 | asyncTest 'single Uint8Array', ->
43 | getData ->
44 | crc = new CRC32
45 | source = new AV.BufferSource buffer
46 |
47 | source.on 'data', (chunk) ->
48 | crc.update chunk
49 |
50 | source.on 'progress', (progress) ->
51 | assert.equal progress, 100
52 |
53 | source.on 'end', ->
54 | assert.equal crc.toHex(), '84d9f967'
55 | assert.start()
56 |
57 | source.start()
58 |
59 | asyncTest 'single ArrayBuffer', ->
60 | getData ->
61 | crc = new CRC32
62 | source = new AV.BufferSource buffer.buffer
63 |
64 | source.on 'data', (chunk) ->
65 | crc.update chunk
66 |
67 | source.on 'progress', (progress) ->
68 | assert.equal progress, 100
69 |
70 | source.on 'end', ->
71 | assert.equal crc.toHex(), '84d9f967'
72 | assert.start()
73 |
74 | source.start()
75 |
76 | asyncTest 'AV.BufferList', ->
77 | getData ->
78 | list = new AV.BufferList
79 | buffers = [
80 | new AV.Buffer(buffer)
81 | new AV.Buffer(buffer)
82 | new AV.Buffer(buffer)
83 | ]
84 |
85 | list.append buffers[0]
86 | list.append buffers[1]
87 | list.append buffers[2]
88 |
89 | source = new AV.BufferSource list
90 |
91 | count = 0
92 | source.on 'data', (chunk) ->
93 | assert.equal chunk, buffers[count++]
94 |
95 | pcount = 0
96 | source.on 'progress', (progress) ->
97 | assert.equal progress, ++pcount / 3 * 100 | 0
98 |
99 | source.on 'end', ->
100 | assert.equal count, 3
101 | assert.start()
102 |
103 | source.start()
--------------------------------------------------------------------------------
/tests/crc32.coffee:
--------------------------------------------------------------------------------
1 | class CRC32
2 | CRC32_TABLE = [
3 | 0x00000000, 0x77073096, 0xee0e612c, 0x990951ba, 0x076dc419, 0x706af48f, 0xe963a535, 0x9e6495a3,
4 | 0x0edb8832, 0x79dcb8a4, 0xe0d5e91e, 0x97d2d988, 0x09b64c2b, 0x7eb17cbd, 0xe7b82d07, 0x90bf1d91,
5 | 0x1db71064, 0x6ab020f2, 0xf3b97148, 0x84be41de, 0x1adad47d, 0x6ddde4eb, 0xf4d4b551, 0x83d385c7,
6 | 0x136c9856, 0x646ba8c0, 0xfd62f97a, 0x8a65c9ec, 0x14015c4f, 0x63066cd9, 0xfa0f3d63, 0x8d080df5,
7 | 0x3b6e20c8, 0x4c69105e, 0xd56041e4, 0xa2677172, 0x3c03e4d1, 0x4b04d447, 0xd20d85fd, 0xa50ab56b,
8 | 0x35b5a8fa, 0x42b2986c, 0xdbbbc9d6, 0xacbcf940, 0x32d86ce3, 0x45df5c75, 0xdcd60dcf, 0xabd13d59,
9 | 0x26d930ac, 0x51de003a, 0xc8d75180, 0xbfd06116, 0x21b4f4b5, 0x56b3c423, 0xcfba9599, 0xb8bda50f,
10 | 0x2802b89e, 0x5f058808, 0xc60cd9b2, 0xb10be924, 0x2f6f7c87, 0x58684c11, 0xc1611dab, 0xb6662d3d,
11 | 0x76dc4190, 0x01db7106, 0x98d220bc, 0xefd5102a, 0x71b18589, 0x06b6b51f, 0x9fbfe4a5, 0xe8b8d433,
12 | 0x7807c9a2, 0x0f00f934, 0x9609a88e, 0xe10e9818, 0x7f6a0dbb, 0x086d3d2d, 0x91646c97, 0xe6635c01,
13 | 0x6b6b51f4, 0x1c6c6162, 0x856530d8, 0xf262004e, 0x6c0695ed, 0x1b01a57b, 0x8208f4c1, 0xf50fc457,
14 | 0x65b0d9c6, 0x12b7e950, 0x8bbeb8ea, 0xfcb9887c, 0x62dd1ddf, 0x15da2d49, 0x8cd37cf3, 0xfbd44c65,
15 | 0x4db26158, 0x3ab551ce, 0xa3bc0074, 0xd4bb30e2, 0x4adfa541, 0x3dd895d7, 0xa4d1c46d, 0xd3d6f4fb,
16 | 0x4369e96a, 0x346ed9fc, 0xad678846, 0xda60b8d0, 0x44042d73, 0x33031de5, 0xaa0a4c5f, 0xdd0d7cc9,
17 | 0x5005713c, 0x270241aa, 0xbe0b1010, 0xc90c2086, 0x5768b525, 0x206f85b3, 0xb966d409, 0xce61e49f,
18 | 0x5edef90e, 0x29d9c998, 0xb0d09822, 0xc7d7a8b4, 0x59b33d17, 0x2eb40d81, 0xb7bd5c3b, 0xc0ba6cad,
19 | 0xedb88320, 0x9abfb3b6, 0x03b6e20c, 0x74b1d29a, 0xead54739, 0x9dd277af, 0x04db2615, 0x73dc1683,
20 | 0xe3630b12, 0x94643b84, 0x0d6d6a3e, 0x7a6a5aa8, 0xe40ecf0b, 0x9309ff9d, 0x0a00ae27, 0x7d079eb1,
21 | 0xf00f9344, 0x8708a3d2, 0x1e01f268, 0x6906c2fe, 0xf762575d, 0x806567cb, 0x196c3671, 0x6e6b06e7,
22 | 0xfed41b76, 0x89d32be0, 0x10da7a5a, 0x67dd4acc, 0xf9b9df6f, 0x8ebeeff9, 0x17b7be43, 0x60b08ed5,
23 | 0xd6d6a3e8, 0xa1d1937e, 0x38d8c2c4, 0x4fdff252, 0xd1bb67f1, 0xa6bc5767, 0x3fb506dd, 0x48b2364b,
24 | 0xd80d2bda, 0xaf0a1b4c, 0x36034af6, 0x41047a60, 0xdf60efc3, 0xa867df55, 0x316e8eef, 0x4669be79,
25 | 0xcb61b38c, 0xbc66831a, 0x256fd2a0, 0x5268e236, 0xcc0c7795, 0xbb0b4703, 0x220216b9, 0x5505262f,
26 | 0xc5ba3bbe, 0xb2bd0b28, 0x2bb45a92, 0x5cb36a04, 0xc2d7ffa7, 0xb5d0cf31, 0x2cd99e8b, 0x5bdeae1d,
27 | 0x9b64c2b0, 0xec63f226, 0x756aa39c, 0x026d930a, 0x9c0906a9, 0xeb0e363f, 0x72076785, 0x05005713,
28 | 0x95bf4a82, 0xe2b87a14, 0x7bb12bae, 0x0cb61b38, 0x92d28e9b, 0xe5d5be0d, 0x7cdcefb7, 0x0bdbdf21,
29 | 0x86d3d2d4, 0xf1d4e242, 0x68ddb3f8, 0x1fda836e, 0x81be16cd, 0xf6b9265b, 0x6fb077e1, 0x18b74777,
30 | 0x88085ae6, 0xff0f6a70, 0x66063bca, 0x11010b5c, 0x8f659eff, 0xf862ae69, 0x616bffd3, 0x166ccf45,
31 | 0xa00ae278, 0xd70dd2ee, 0x4e048354, 0x3903b3c2, 0xa7672661, 0xd06016f7, 0x4969474d, 0x3e6e77db,
32 | 0xaed16a4a, 0xd9d65adc, 0x40df0b66, 0x37d83bf0, 0xa9bcae53, 0xdebb9ec5, 0x47b2cf7f, 0x30b5ffe9,
33 | 0xbdbdf21c, 0xcabac28a, 0x53b39330, 0x24b4a3a6, 0xbad03605, 0xcdd70693, 0x54de5729, 0x23d967bf,
34 | 0xb3667a2e, 0xc4614ab8, 0x5d681b02, 0x2a6f2b94, 0xb40bbe37, 0xc30c8ea1, 0x5a05df1b, 0x2d02ef8d
35 | ]
36 |
37 | constructor: ->
38 | @crc = ~0
39 |
40 | update: (buffer) ->
41 | for byte in buffer.data
42 | @crc = CRC32_TABLE[(@crc ^ byte) & 0xff] ^ (@crc >>> 8)
43 |
44 | return
45 |
46 | toHex: ->
47 | return (~@crc >>> 0).toString(16)
--------------------------------------------------------------------------------
/tests/core/buffer.coffee:
--------------------------------------------------------------------------------
1 | module 'core/buffer', ->
2 | bytes = new Uint8Array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
3 | buffer = new AV.Buffer(bytes)
4 |
5 | test 'length', ->
6 | assert.equal 10, buffer.length
7 |
8 | test 'allocate', ->
9 | buf = AV.Buffer.allocate(10)
10 | assert.equal 10, buf.length
11 | assert.ok buf.data instanceof Uint8Array
12 | assert.equal 10, buf.data.length
13 |
14 | test 'copy', ->
15 | copy = buffer.copy()
16 |
17 | assert.equal buffer.length, copy.length
18 | assert.notEqual buffer.data, copy.data
19 | assert.equal buffer.data.length, copy.data.length
20 |
21 | test 'slice', ->
22 | assert.equal 4, buffer.slice(0, 4).length
23 | assert.equal bytes, buffer.slice(0, 100).data
24 | assert.deepEqual new AV.Buffer(bytes.subarray(3, 6)), buffer.slice(3, 3)
25 | assert.equal 5, buffer.slice(5).length
26 |
27 | test 'create from ArrayBuffer', ->
28 | buf = new AV.Buffer(new ArrayBuffer(9))
29 | assert.equal 9, buf.length
30 | assert.ok buf.data instanceof Uint8Array
31 | assert.equal 9, buf.data.length
32 | assert.deepEqual buf, new AV.Buffer(new Uint8Array(9))
33 |
34 | test 'create from typed array', ->
35 | buf = new AV.Buffer(new Uint32Array(9))
36 | assert.equal 36, buf.length
37 | assert.ok buf.data instanceof Uint8Array
38 | assert.equal 36, buf.data.length
39 | assert.deepEqual buf, new AV.Buffer(new Uint8Array(36))
40 |
41 | test 'create from sliced typed array', ->
42 | buf = new AV.Buffer(new Uint32Array(9).subarray(2, 6))
43 | assert.equal 16, buf.length
44 | assert.ok buf.data instanceof Uint8Array
45 | assert.equal 16, buf.data.length
46 | assert.deepEqual buf, new AV.Buffer(new Uint8Array(new ArrayBuffer(36), 8, 16))
47 |
48 | test 'create from array', ->
49 | buf = new AV.Buffer([1,2,3,4,5,6,7,8,9])
50 | assert.equal 9, buf.length
51 | assert.ok buf.data instanceof Uint8Array
52 | assert.equal 9, buf.data.length
53 | assert.deepEqual buf, new AV.Buffer(new Uint8Array([1,2,3,4,5,6,7,8,9]))
54 |
55 | test 'create from number', ->
56 | buf = new AV.Buffer(9)
57 | assert.equal 9, buf.length
58 | assert.ok buf.data instanceof Uint8Array
59 | assert.equal 9, buf.data.length
60 | assert.deepEqual buf, new AV.Buffer(new Uint8Array(9))
61 |
62 | test 'create from another AV.Buffer', ->
63 | buf = new AV.Buffer(new AV.Buffer(9))
64 | assert.equal 9, buf.length
65 | assert.ok buf.data instanceof Uint8Array
66 | assert.equal 9, buf.data.length
67 | assert.deepEqual buf, new AV.Buffer(new Uint8Array(9))
68 |
69 | if AV.isNode
70 | test 'create from node buffer', ->
71 | buf = new AV.Buffer(new Buffer([1,2,3,4,5,6,7,8,9]))
72 | assert.equal 9, buf.length
73 | assert.ok buf.data instanceof Uint8Array
74 | assert.equal 9, buf.data.length
75 | assert.deepEqual buf, new AV.Buffer(new Uint8Array([1,2,3,4,5,6,7,8,9]))
76 |
77 | test 'error constructing', ->
78 | assert.throws ->
79 | new AV.Buffer('some string')
80 |
81 | assert.throws ->
82 | new AV.Buffer(true)
83 |
84 | if Blob?
85 | test 'makeBlob', ->
86 | assert.ok AV.Buffer.makeBlob(bytes) instanceof Blob
87 |
88 | test 'makeBlobURL', ->
89 | assert.equal 'string', typeof AV.Buffer.makeBlobURL(bytes)
90 |
91 | test 'toBlob', ->
92 | assert.ok buffer.toBlob() instanceof Blob
93 |
94 | test 'toBlobURL', ->
95 | assert.equal 'string', typeof buffer.toBlobURL()
--------------------------------------------------------------------------------
/src/asset.coffee:
--------------------------------------------------------------------------------
1 | #
2 | # The Asset class is responsible for managing all aspects of the
3 | # decoding pipeline from source to decoder. You can use the Asset
4 | # class to inspect information about an audio file, such as its
5 | # format, metadata, and duration, as well as actually decode the
6 | # file to linear PCM raw audio data.
7 | #
8 |
9 | class AV.Asset extends AV.EventEmitter
10 | constructor: (@source) ->
11 | @buffered = 0
12 | @duration = null
13 | @format = null
14 | @metadata = null
15 | @active = false
16 | @demuxer = null
17 | @decoder = null
18 |
19 | @source.once 'data', @probe
20 | @source.on 'error', (err) =>
21 | @emit 'error', err
22 | @stop()
23 |
24 | @source.on 'progress', (@buffered) =>
25 | @emit 'buffer', @buffered
26 |
27 | @fromURL: (url) ->
28 | return new AV.Asset new AV.HTTPSource(url)
29 |
30 | @fromFile: (file) ->
31 | return new AV.Asset new AV.FileSource(file)
32 |
33 | @fromBuffer: (buffer) ->
34 | return new AV.Asset new AV.BufferSource(buffer)
35 |
36 | start: (decode) ->
37 | return if @active
38 |
39 | @shouldDecode = decode if decode?
40 | @shouldDecode ?= true
41 |
42 | @active = true
43 | @source.start()
44 |
45 | if @decoder and @shouldDecode
46 | @_decode()
47 |
48 | stop: ->
49 | return unless @active
50 |
51 | @active = false
52 | @source.pause()
53 |
54 | get: (event, callback) ->
55 | return unless event in ['format', 'duration', 'metadata']
56 |
57 | if this[event]?
58 | callback(this[event])
59 | else
60 | @once event, (value) =>
61 | @stop()
62 | callback(value)
63 |
64 | @start()
65 |
66 | decodePacket: ->
67 | @decoder.decode()
68 |
69 | decodeToBuffer: (callback) ->
70 | length = 0
71 | chunks = []
72 | @on 'data', dataHandler = (chunk) ->
73 | length += chunk.length
74 | chunks.push chunk
75 |
76 | @once 'end', ->
77 | buf = new Float32Array(length)
78 | offset = 0
79 |
80 | for chunk in chunks
81 | buf.set(chunk, offset)
82 | offset += chunk.length
83 |
84 | @off 'data', dataHandler
85 | callback(buf)
86 |
87 | @start()
88 |
89 | probe: (chunk) =>
90 | return unless @active
91 |
92 | demuxer = AV.Demuxer.find(chunk)
93 | if not demuxer
94 | return @emit 'error', 'A demuxer for this container was not found.'
95 |
96 | @demuxer = new demuxer(@source, chunk)
97 | @demuxer.on 'format', @findDecoder
98 |
99 | @demuxer.on 'duration', (@duration) =>
100 | @emit 'duration', @duration
101 |
102 | @demuxer.on 'metadata', (@metadata) =>
103 | @emit 'metadata', @metadata
104 |
105 | @demuxer.on 'error', (err) =>
106 | @emit 'error', err
107 | @stop()
108 |
109 | findDecoder: (@format) =>
110 | return unless @active
111 |
112 | @emit 'format', @format
113 |
114 | decoder = AV.Decoder.find(@format.formatID)
115 | if not decoder
116 | return @emit 'error', "A decoder for #{@format.formatID} was not found."
117 |
118 | @decoder = new decoder(@demuxer, @format)
119 |
120 | if @format.floatingPoint
121 | @decoder.on 'data', (buffer) =>
122 | @emit 'data', buffer
123 | else
124 | div = Math.pow(2, @format.bitsPerChannel - 1)
125 | @decoder.on 'data', (buffer) =>
126 | buf = new Float32Array(buffer.length)
127 | for sample, i in buffer
128 | buf[i] = sample / div
129 |
130 | @emit 'data', buf
131 |
132 | @decoder.on 'error', (err) =>
133 | @emit 'error', err
134 | @stop()
135 |
136 | @decoder.on 'end', =>
137 | @emit 'end'
138 |
139 | @emit 'decodeStart'
140 | @_decode() if @shouldDecode
141 |
142 | _decode: =>
143 | continue while @decoder.decode() and @active
144 | @decoder.once 'data', @_decode if @active
--------------------------------------------------------------------------------
/src/player.coffee:
--------------------------------------------------------------------------------
1 | #
2 | # The Player class plays back audio data from various sources
3 | # as decoded by the Asset class. In addition, it handles
4 | # common audio filters like panning and volume adjustment,
5 | # and interfacing with AudioDevices to keep track of the
6 | # playback time.
7 | #
8 |
9 | class AV.Player extends AV.EventEmitter
10 | constructor: (@asset) ->
11 | @playing = false
12 | @buffered = 0
13 | @currentTime = 0
14 | @duration = 0
15 | @volume = 100
16 | @pan = 0 # -50 for left, 50 for right, 0 for center
17 | @metadata = {}
18 |
19 | @filters = [
20 | new AV.VolumeFilter(this, 'volume')
21 | new AV.BalanceFilter(this, 'pan')
22 | ]
23 |
24 | @asset.on 'buffer', (@buffered) =>
25 | @emit 'buffer', @buffered
26 |
27 | @asset.on 'decodeStart', =>
28 | @queue = new AV.Queue(@asset)
29 | @queue.once 'ready', @startPlaying
30 |
31 | @asset.on 'format', (@format) =>
32 | @emit 'format', @format
33 |
34 | @asset.on 'metadata', (@metadata) =>
35 | @emit 'metadata', @metadata
36 |
37 | @asset.on 'duration', (@duration) =>
38 | @emit 'duration', @duration
39 |
40 | @asset.on 'error', (error) =>
41 | @emit 'error', error
42 |
43 | @fromURL: (url) ->
44 | return new AV.Player AV.Asset.fromURL(url)
45 |
46 | @fromFile: (file) ->
47 | return new AV.Player AV.Asset.fromFile(file)
48 |
49 | @fromBuffer: (buffer) ->
50 | return new AV.Player AV.Asset.fromBuffer(buffer)
51 |
52 | preload: ->
53 | return unless @asset
54 |
55 | @startedPreloading = true
56 | @asset.start(false)
57 |
58 | play: ->
59 | return if @playing
60 |
61 | unless @startedPreloading
62 | @preload()
63 |
64 | @playing = true
65 | @device?.start()
66 |
67 | pause: ->
68 | return unless @playing
69 |
70 | @playing = false
71 | @device?.stop()
72 |
73 | togglePlayback: ->
74 | if @playing
75 | @pause()
76 | else
77 | @play()
78 |
79 | stop: ->
80 | @pause()
81 | @asset.stop()
82 | @device?.destroy()
83 |
84 | seek: (timestamp) ->
85 | @device?.stop()
86 | @queue.once 'ready', =>
87 | @device?.seek @currentTime
88 | @device?.start() if @playing
89 |
90 | # convert timestamp to sample number
91 | timestamp = (timestamp / 1000) * @format.sampleRate
92 |
93 | # the actual timestamp we seeked to may differ
94 | # from the requested timestamp due to optimizations
95 | timestamp = @asset.decoder.seek(timestamp)
96 |
97 | # convert back from samples to milliseconds
98 | @currentTime = timestamp / @format.sampleRate * 1000 | 0
99 |
100 | @queue.reset()
101 | return @currentTime
102 |
103 | startPlaying: =>
104 | frame = @queue.read()
105 | frameOffset = 0
106 |
107 | @device = new AV.AudioDevice(@format.sampleRate, @format.channelsPerFrame)
108 | @device.on 'timeUpdate', (@currentTime) =>
109 | @emit 'progress', @currentTime
110 |
111 | @refill = (buffer) =>
112 | return unless @playing
113 |
114 | # try reading another frame if one isn't already available
115 | # happens when we play to the end and then seek back
116 | if not frame
117 | frame = @queue.read()
118 | frameOffset = 0
119 |
120 | bufferOffset = 0
121 | while frame and bufferOffset < buffer.length
122 | max = Math.min(frame.length - frameOffset, buffer.length - bufferOffset)
123 | for i in [0...max] by 1
124 | buffer[bufferOffset++] = frame[frameOffset++]
125 |
126 | if frameOffset is frame.length
127 | frame = @queue.read()
128 | frameOffset = 0
129 |
130 | # run any applied filters
131 | for filter in @filters
132 | filter.process(buffer)
133 |
134 | # if we've run out of data, pause the player
135 | unless frame
136 | # if this was the end of the track, make
137 | # sure the currentTime reflects that
138 | if @queue.ended
139 | @currentTime = @duration
140 | @emit 'progress', @currentTime
141 | @emit 'end'
142 | @stop()
143 | else
144 | # if we ran out of data in the middle of
145 | # the track, stop the timer but don't change
146 | # the playback state
147 | @device.stop()
148 |
149 | return
150 |
151 | @device.on 'refill', @refill
152 | @device.start() if @playing
153 | @emit 'ready'
--------------------------------------------------------------------------------
/tests/core/bufferlist.coffee:
--------------------------------------------------------------------------------
1 | module 'core/bufferlist', ->
2 | test 'append', ->
3 | list = new AV.BufferList
4 | buffer = new AV.Buffer(new Uint8Array([1, 2, 3]))
5 | list.append buffer
6 |
7 | assert.equal 1, list.numBuffers
8 | assert.equal 1, list.availableBuffers
9 | assert.equal 3, list.availableBytes
10 | assert.equal buffer, list.first
11 | assert.equal buffer, list.last
12 | assert.equal null, buffer.prev
13 | assert.equal null, buffer.next
14 |
15 | buffer2 = new AV.Buffer(new Uint8Array([4, 5, 6]))
16 | list.append buffer2
17 |
18 | assert.equal 2, list.numBuffers
19 | assert.equal 2, list.availableBuffers
20 | assert.equal 6, list.availableBytes
21 | assert.equal buffer, list.first
22 | assert.equal buffer2, list.last
23 |
24 | assert.equal null, buffer.prev
25 | assert.equal buffer2, buffer.next
26 | assert.equal buffer, buffer2.prev
27 | assert.equal null, buffer2.next
28 |
29 | test 'advance', ->
30 | list = new AV.BufferList
31 | buffer1 = AV.Buffer.allocate(3)
32 | buffer2 = AV.Buffer.allocate(3)
33 | list.append buffer1
34 | list.append buffer2
35 |
36 | assert.equal 2, list.numBuffers
37 | assert.equal 2, list.availableBuffers
38 | assert.equal 6, list.availableBytes
39 | assert.equal buffer1, list.first
40 |
41 | assert.equal true, list.advance()
42 | assert.equal 2, list.numBuffers
43 | assert.equal 1, list.availableBuffers
44 | assert.equal 3, list.availableBytes
45 | assert.equal buffer2, list.first
46 |
47 | assert.equal false, list.advance()
48 | assert.equal null, list.first
49 | assert.equal 2, list.numBuffers
50 | assert.equal 0, list.availableBuffers
51 | assert.equal 0, list.availableBytes
52 |
53 | test 'rewind', ->
54 | list = new AV.BufferList
55 | buffer1 = AV.Buffer.allocate(3)
56 | buffer2 = AV.Buffer.allocate(3)
57 | list.append buffer1
58 | list.append buffer2
59 |
60 | assert.equal 2, list.numBuffers
61 | assert.equal 2, list.availableBuffers
62 | assert.equal 6, list.availableBytes
63 |
64 | assert.equal true, list.advance()
65 | assert.equal buffer2, list.first
66 | assert.equal 2, list.numBuffers
67 | assert.equal 1, list.availableBuffers
68 | assert.equal 3, list.availableBytes
69 |
70 | assert.equal true, list.rewind()
71 | assert.equal buffer1, list.first
72 | assert.equal 2, list.numBuffers
73 | assert.equal 2, list.availableBuffers
74 | assert.equal 6, list.availableBytes
75 |
76 | # can't rewind anymore so nothing should change
77 | assert.equal false, list.rewind()
78 | assert.equal buffer1, list.first
79 | assert.equal 2, list.numBuffers
80 | assert.equal 2, list.availableBuffers
81 | assert.equal 6, list.availableBytes
82 |
83 | # advancing past the end of the list and then rewinding should give us the last buffer
84 | assert.equal true, list.advance()
85 | assert.equal false, list.advance()
86 | assert.equal null, list.first
87 | assert.equal 2, list.numBuffers
88 | assert.equal 0, list.availableBuffers
89 | assert.equal 0, list.availableBytes
90 |
91 | assert.equal true, list.rewind()
92 | assert.equal buffer2, list.first
93 | assert.equal 2, list.numBuffers
94 | assert.equal 1, list.availableBuffers
95 | assert.equal 3, list.availableBytes
96 |
97 | test 'reset', ->
98 | list = new AV.BufferList
99 | buffer1 = AV.Buffer.allocate(3)
100 | buffer2 = AV.Buffer.allocate(3)
101 | buffer3 = AV.Buffer.allocate(3)
102 | list.append buffer1
103 | list.append buffer2
104 | list.append buffer3
105 |
106 | assert.equal buffer1, list.first
107 | assert.equal 3, list.numBuffers
108 | assert.equal 3, list.availableBuffers
109 | assert.equal 9, list.availableBytes
110 |
111 | assert.equal true, list.advance()
112 | assert.equal buffer2, list.first
113 | assert.equal 3, list.numBuffers
114 | assert.equal 2, list.availableBuffers
115 | assert.equal 6, list.availableBytes
116 |
117 | assert.equal true, list.advance()
118 | assert.equal buffer3, list.first
119 | assert.equal 3, list.numBuffers
120 | assert.equal 1, list.availableBuffers
121 | assert.equal 3, list.availableBytes
122 |
123 | list.reset()
124 | assert.equal buffer1, list.first
125 | assert.equal 3, list.numBuffers
126 | assert.equal 3, list.availableBuffers
127 | assert.equal 9, list.availableBytes
128 |
129 | test 'copy', ->
130 | list = new AV.BufferList
131 | buffer = AV.Buffer.allocate(3)
132 | list.append buffer
133 |
134 | copy = list.copy()
135 |
136 | assert.equal 1, list.numBuffers, copy.numBuffers
137 | assert.equal list.availableBuffers, copy.availableBuffers
138 | assert.equal list.availableBytes, copy.availableBytes
139 | assert.equal list.first, copy.first
--------------------------------------------------------------------------------
/tests/qunit/qunit.css:
--------------------------------------------------------------------------------
1 | /**
2 | * QUnit v1.10.0 - A JavaScript Unit Testing Framework
3 | *
4 | * http://qunitjs.com
5 | *
6 | * Copyright 2012 jQuery Foundation and other contributors
7 | * Released under the MIT license.
8 | * http://jquery.org/license
9 | */
10 |
11 | /** Font Family and Sizes */
12 |
13 | #qunit-tests, #qunit-header, #qunit-banner, #qunit-testrunner-toolbar, #qunit-userAgent, #qunit-testresult {
14 | font-family: "Helvetica Neue Light", "HelveticaNeue-Light", "Helvetica Neue", Calibri, Helvetica, Arial, sans-serif;
15 | }
16 |
17 | #qunit-testrunner-toolbar, #qunit-userAgent, #qunit-testresult, #qunit-tests li { font-size: small; }
18 | #qunit-tests { font-size: smaller; }
19 |
20 |
21 | /** Resets */
22 |
23 | #qunit-tests, #qunit-tests ol, #qunit-header, #qunit-banner, #qunit-userAgent, #qunit-testresult, #qunit-modulefilter {
24 | margin: 0;
25 | padding: 0;
26 | }
27 |
28 |
29 | /** Header */
30 |
31 | #qunit-header {
32 | padding: 0.5em 0 0.5em 1em;
33 |
34 | color: #8699a4;
35 | background-color: #0d3349;
36 |
37 | font-size: 1.5em;
38 | line-height: 1em;
39 | font-weight: normal;
40 |
41 | border-radius: 5px 5px 0 0;
42 | -moz-border-radius: 5px 5px 0 0;
43 | -webkit-border-top-right-radius: 5px;
44 | -webkit-border-top-left-radius: 5px;
45 | }
46 |
47 | #qunit-header a {
48 | text-decoration: none;
49 | color: #c2ccd1;
50 | }
51 |
52 | #qunit-header a:hover,
53 | #qunit-header a:focus {
54 | color: #fff;
55 | }
56 |
57 | #qunit-testrunner-toolbar label {
58 | display: inline-block;
59 | padding: 0 .5em 0 .1em;
60 | }
61 |
62 | #qunit-banner {
63 | height: 5px;
64 | }
65 |
66 | #qunit-testrunner-toolbar {
67 | padding: 0.5em 0 0.5em 2em;
68 | color: #5E740B;
69 | background-color: #eee;
70 | overflow: hidden;
71 | }
72 |
73 | #qunit-userAgent {
74 | padding: 0.5em 0 0.5em 2.5em;
75 | background-color: #2b81af;
76 | color: #fff;
77 | text-shadow: rgba(0, 0, 0, 0.5) 2px 2px 1px;
78 | }
79 |
80 | #qunit-modulefilter-container {
81 | float: right;
82 | }
83 |
84 | /** Tests: Pass/Fail */
85 |
86 | #qunit-tests {
87 | list-style-position: inside;
88 | }
89 |
90 | #qunit-tests li {
91 | padding: 0.4em 0.5em 0.4em 2.5em;
92 | border-bottom: 1px solid #fff;
93 | list-style-position: inside;
94 | }
95 |
96 | #qunit-tests.hidepass li.pass, #qunit-tests.hidepass li.running {
97 | display: none;
98 | }
99 |
100 | #qunit-tests li strong {
101 | cursor: pointer;
102 | }
103 |
104 | #qunit-tests li a {
105 | padding: 0.5em;
106 | color: #c2ccd1;
107 | text-decoration: none;
108 | }
109 | #qunit-tests li a:hover,
110 | #qunit-tests li a:focus {
111 | color: #000;
112 | }
113 |
114 | #qunit-tests ol {
115 | margin-top: 0.5em;
116 | padding: 0.5em;
117 |
118 | background-color: #fff;
119 |
120 | border-radius: 5px;
121 | -moz-border-radius: 5px;
122 | -webkit-border-radius: 5px;
123 | }
124 |
125 | #qunit-tests table {
126 | border-collapse: collapse;
127 | margin-top: .2em;
128 | }
129 |
130 | #qunit-tests th {
131 | text-align: right;
132 | vertical-align: top;
133 | padding: 0 .5em 0 0;
134 | }
135 |
136 | #qunit-tests td {
137 | vertical-align: top;
138 | }
139 |
140 | #qunit-tests pre {
141 | margin: 0;
142 | white-space: pre-wrap;
143 | word-wrap: break-word;
144 | }
145 |
146 | #qunit-tests del {
147 | background-color: #e0f2be;
148 | color: #374e0c;
149 | text-decoration: none;
150 | }
151 |
152 | #qunit-tests ins {
153 | background-color: #ffcaca;
154 | color: #500;
155 | text-decoration: none;
156 | }
157 |
158 | /*** Test Counts */
159 |
160 | #qunit-tests b.counts { color: black; }
161 | #qunit-tests b.passed { color: #5E740B; }
162 | #qunit-tests b.failed { color: #710909; }
163 |
164 | #qunit-tests li li {
165 | padding: 5px;
166 | background-color: #fff;
167 | border-bottom: none;
168 | list-style-position: inside;
169 | }
170 |
171 | /*** Passing Styles */
172 |
173 | #qunit-tests li li.pass {
174 | color: #3c510c;
175 | background-color: #fff;
176 | border-left: 10px solid #C6E746;
177 | }
178 |
179 | #qunit-tests .pass { color: #528CE0; background-color: #D2E0E6; }
180 | #qunit-tests .pass .test-name { color: #366097; }
181 |
182 | #qunit-tests .pass .test-actual,
183 | #qunit-tests .pass .test-expected { color: #999999; }
184 |
185 | #qunit-banner.qunit-pass { background-color: #C6E746; }
186 |
187 | /*** Failing Styles */
188 |
189 | #qunit-tests li li.fail {
190 | color: #710909;
191 | background-color: #fff;
192 | border-left: 10px solid #EE5757;
193 | white-space: pre;
194 | }
195 |
196 | #qunit-tests > li:last-child {
197 | border-radius: 0 0 5px 5px;
198 | -moz-border-radius: 0 0 5px 5px;
199 | -webkit-border-bottom-right-radius: 5px;
200 | -webkit-border-bottom-left-radius: 5px;
201 | }
202 |
203 | #qunit-tests .fail { color: #000000; background-color: #EE5757; }
204 | #qunit-tests .fail .test-name,
205 | #qunit-tests .fail .module-name { color: #000000; }
206 |
207 | #qunit-tests .fail .test-actual { color: #EE5757; }
208 | #qunit-tests .fail .test-expected { color: green; }
209 |
210 | #qunit-banner.qunit-fail { background-color: #EE5757; }
211 |
212 |
213 | /** Result */
214 |
215 | #qunit-testresult {
216 | padding: 0.5em 0.5em 0.5em 2.5em;
217 |
218 | color: #2b81af;
219 | background-color: #D2E0E6;
220 |
221 | border-bottom: 1px solid white;
222 | }
223 | #qunit-testresult .module-name {
224 | font-weight: bold;
225 | }
226 |
227 | /** Fixture */
228 |
229 | #qunit-fixture {
230 | position: absolute;
231 | top: -10000px;
232 | left: -10000px;
233 | width: 1000px;
234 | height: 1000px;
235 | }
--------------------------------------------------------------------------------
/src/demuxers/caf.coffee:
--------------------------------------------------------------------------------
1 | class CAFDemuxer extends AV.Demuxer
2 | AV.Demuxer.register(CAFDemuxer)
3 |
4 | @probe: (buffer) ->
5 | return buffer.peekString(0, 4) is 'caff'
6 |
7 | readChunk: ->
8 | if not @format and @stream.available(64) # Number out of my behind
9 | if @stream.readString(4) isnt 'caff'
10 | return @emit 'error', "Invalid CAF, does not begin with 'caff'"
11 |
12 | # skip version and flags
13 | @stream.advance(4)
14 |
15 | if @stream.readString(4) isnt 'desc'
16 | return @emit 'error', "Invalid CAF, 'caff' is not followed by 'desc'"
17 |
18 | unless @stream.readUInt32() is 0 and @stream.readUInt32() is 32
19 | return @emit 'error', "Invalid 'desc' size, should be 32"
20 |
21 | @format = {}
22 | @format.sampleRate = @stream.readFloat64()
23 | @format.formatID = @stream.readString(4)
24 |
25 | flags = @stream.readUInt32()
26 | if @format.formatID is 'lpcm'
27 | @format.floatingPoint = Boolean(flags & 1)
28 | @format.littleEndian = Boolean(flags & 2)
29 |
30 | @format.bytesPerPacket = @stream.readUInt32()
31 | @format.framesPerPacket = @stream.readUInt32()
32 | @format.channelsPerFrame = @stream.readUInt32()
33 | @format.bitsPerChannel = @stream.readUInt32()
34 |
35 | @emit 'format', @format
36 |
37 | while @stream.available(1)
38 | unless @headerCache
39 | @headerCache =
40 | type: @stream.readString(4)
41 | oversize: @stream.readUInt32() isnt 0
42 | size: @stream.readUInt32()
43 |
44 | if @headerCache.oversize
45 | return @emit 'error', "Holy Shit, an oversized file, not supported in JS"
46 |
47 | switch @headerCache.type
48 | when 'kuki'
49 | if @stream.available(@headerCache.size)
50 | if @format.formatID is 'aac ' # variations needed?
51 | offset = @stream.offset + @headerCache.size
52 | if cookie = M4ADemuxer.readEsds(@stream)
53 | @emit 'cookie', cookie
54 |
55 | @stream.seek offset # skip extra garbage
56 |
57 | else
58 | buffer = @stream.readBuffer(@headerCache.size)
59 | @emit 'cookie', buffer
60 |
61 | @headerCache = null
62 |
63 | when 'pakt'
64 | if @stream.available(@headerCache.size)
65 | if @stream.readUInt32() isnt 0
66 | return @emit 'error', 'Sizes greater than 32 bits are not supported.'
67 |
68 | @numPackets = @stream.readUInt32()
69 |
70 | if @stream.readUInt32() isnt 0
71 | return @emit 'error', 'Sizes greater than 32 bits are not supported.'
72 |
73 | @numFrames = @stream.readUInt32()
74 | @primingFrames = @stream.readUInt32()
75 | @remainderFrames = @stream.readUInt32()
76 |
77 | @emit 'duration', @numFrames / @format.sampleRate * 1000 | 0
78 | @sentDuration = true
79 |
80 | byteOffset = 0
81 | sampleOffset = 0
82 | for i in [0...@numPackets] by 1
83 | @addSeekPoint byteOffset, sampleOffset
84 | byteOffset += @format.bytesPerPacket or M4ADemuxer.readDescrLen(@stream)
85 | sampleOffset += @format.framesPerPacket or M4ADemuxer.readDescrLen(@stream)
86 |
87 | @headerCache = null
88 |
89 | when 'info'
90 | entries = @stream.readUInt32()
91 | metadata = {}
92 |
93 | for i in [0...entries]
94 | # null terminated strings
95 | key = @stream.readString(null)
96 | value = @stream.readString(null)
97 | metadata[key] = value
98 |
99 | @emit 'metadata', metadata
100 | @headerCache = null
101 |
102 | when 'data'
103 | unless @sentFirstDataChunk
104 | # skip edit count
105 | @stream.advance(4)
106 | @headerCache.size -= 4
107 |
108 | # calculate the duration based on bytes per packet if no packet table
109 | if @format.bytesPerPacket isnt 0 and not @sentDuration
110 | @numFrames = @headerCache.size / @format.bytesPerPacket
111 | @emit 'duration', @numFrames / @format.sampleRate * 1000 | 0
112 |
113 | @sentFirstDataChunk = true
114 |
115 | buffer = @stream.readSingleBuffer(@headerCache.size)
116 | @headerCache.size -= buffer.length
117 | @emit 'data', buffer
118 |
119 | if @headerCache.size <= 0
120 | @headerCache = null
121 |
122 | else
123 | if @stream.available(@headerCache.size)
124 | @stream.advance(@headerCache.size)
125 | @headerCache = null
126 |
127 | return
--------------------------------------------------------------------------------
/src/core/bitstream.coffee:
--------------------------------------------------------------------------------
1 | class AV.Bitstream
2 | constructor: (@stream) ->
3 | @bitPosition = 0
4 |
5 | copy: ->
6 | result = new AV.Bitstream @stream.copy()
7 | result.bitPosition = @bitPosition
8 | return result
9 |
10 | offset: -> # Should be a property
11 | return 8 * @stream.offset + @bitPosition
12 |
13 | available: (bits) ->
14 | return @stream.available((bits + 8 - @bitPosition) / 8)
15 |
16 | advance: (bits) ->
17 | pos = @bitPosition + bits
18 | @stream.advance(pos >> 3)
19 | @bitPosition = pos & 7
20 |
21 | rewind: (bits) ->
22 | pos = @bitPosition - bits
23 | @stream.rewind(Math.abs(pos >> 3))
24 | @bitPosition = pos & 7
25 |
26 | seek: (offset) ->
27 | curOffset = @offset()
28 |
29 | if offset > curOffset
30 | @advance offset - curOffset
31 |
32 | else if offset < curOffset
33 | @rewind curOffset - offset
34 |
35 | align: ->
36 | unless @bitPosition is 0
37 | @bitPosition = 0
38 | @stream.advance(1)
39 |
40 | read: (bits, signed) ->
41 | return 0 if bits is 0
42 |
43 | mBits = bits + @bitPosition
44 | if mBits <= 8
45 | a = ((@stream.peekUInt8() << @bitPosition) & 0xff) >>> (8 - bits)
46 |
47 | else if mBits <= 16
48 | a = ((@stream.peekUInt16() << @bitPosition) & 0xffff) >>> (16 - bits)
49 |
50 | else if mBits <= 24
51 | a = ((@stream.peekUInt24() << @bitPosition) & 0xffffff) >>> (24 - bits)
52 |
53 | else if mBits <= 32
54 | a = (@stream.peekUInt32() << @bitPosition) >>> (32 - bits)
55 |
56 | else if mBits <= 40
57 | a0 = @stream.peekUInt8(0) * 0x0100000000 # same as a << 32
58 | a1 = @stream.peekUInt8(1) << 24 >>> 0
59 | a2 = @stream.peekUInt8(2) << 16
60 | a3 = @stream.peekUInt8(3) << 8
61 | a4 = @stream.peekUInt8(4)
62 |
63 | a = a0 + a1 + a2 + a3 + a4
64 | a %= Math.pow(2, 40 - @bitPosition) # (a << bitPosition) & 0xffffffffff
65 | a = Math.floor(a / Math.pow(2, 40 - @bitPosition - bits)) # a >>> (40 - bits)
66 |
67 | else
68 | throw new Error "Too many bits!"
69 |
70 | if signed
71 | # if the sign bit is turned on, flip the bits and
72 | # add one to convert to a negative value
73 | if mBits < 32
74 | if a >>> (bits - 1)
75 | a = ((1 << bits >>> 0) - a) * -1
76 | else
77 | if a / Math.pow(2, bits - 1) | 0
78 | a = (Math.pow(2, bits) - a) * -1
79 |
80 | @advance bits
81 | return a
82 |
83 | peek: (bits, signed) ->
84 | return 0 if bits is 0
85 |
86 | mBits = bits + @bitPosition
87 | if mBits <= 8
88 | a = ((@stream.peekUInt8() << @bitPosition) & 0xff) >>> (8 - bits)
89 |
90 | else if mBits <= 16
91 | a = ((@stream.peekUInt16() << @bitPosition) & 0xffff) >>> (16 - bits)
92 |
93 | else if mBits <= 24
94 | a = ((@stream.peekUInt24() << @bitPosition) & 0xffffff) >>> (24 - bits)
95 |
96 | else if mBits <= 32
97 | a = (@stream.peekUInt32() << @bitPosition) >>> (32 - bits)
98 |
99 | else if mBits <= 40
100 | a0 = @stream.peekUInt8(0) * 0x0100000000 # same as a << 32
101 | a1 = @stream.peekUInt8(1) << 24 >>> 0
102 | a2 = @stream.peekUInt8(2) << 16
103 | a3 = @stream.peekUInt8(3) << 8
104 | a4 = @stream.peekUInt8(4)
105 |
106 | a = a0 + a1 + a2 + a3 + a4
107 | a %= Math.pow(2, 40 - @bitPosition) # (a << bitPosition) & 0xffffffffff
108 | a = Math.floor(a / Math.pow(2, 40 - @bitPosition - bits)) # a >>> (40 - bits)
109 |
110 | else
111 | throw new Error "Too many bits!"
112 |
113 | if signed
114 | # if the sign bit is turned on, flip the bits and
115 | # add one to convert to a negative value
116 | if mBits < 32
117 | if a >>> (bits - 1)
118 | a = ((1 << bits >>> 0) - a) * -1
119 | else
120 | if a / Math.pow(2, bits - 1) | 0
121 | a = (Math.pow(2, bits) - a) * -1
122 |
123 | return a
124 |
125 | readLSB: (bits, signed) ->
126 | return 0 if bits is 0
127 | if bits > 40
128 | throw new Error "Too many bits!"
129 |
130 | mBits = bits + @bitPosition
131 | a = (@stream.peekUInt8(0)) >>> @bitPosition
132 | a |= (@stream.peekUInt8(1)) << (8 - @bitPosition) if mBits > 8
133 | a |= (@stream.peekUInt8(2)) << (16 - @bitPosition) if mBits > 16
134 | a += (@stream.peekUInt8(3)) << (24 - @bitPosition) >>> 0 if mBits > 24
135 | a += (@stream.peekUInt8(4)) * Math.pow(2, 32 - @bitPosition) if mBits > 32
136 |
137 | if mBits >= 32
138 | a %= Math.pow(2, bits)
139 | else
140 | a &= (1 << bits) - 1
141 |
142 | if signed
143 | # if the sign bit is turned on, flip the bits and
144 | # add one to convert to a negative value
145 | if mBits < 32
146 | if a >>> (bits - 1)
147 | a = ((1 << bits >>> 0) - a) * -1
148 | else
149 | if a / Math.pow(2, bits - 1) | 0
150 | a = (Math.pow(2, bits) - a) * -1
151 |
152 | @advance bits
153 | return a
154 |
155 | peekLSB: (bits, signed) ->
156 | return 0 if bits is 0
157 | if bits > 40
158 | throw new Error "Too many bits!"
159 |
160 | mBits = bits + @bitPosition
161 | a = (@stream.peekUInt8(0)) >>> @bitPosition
162 | a |= (@stream.peekUInt8(1)) << (8 - @bitPosition) if mBits > 8
163 | a |= (@stream.peekUInt8(2)) << (16 - @bitPosition) if mBits > 16
164 | a += (@stream.peekUInt8(3)) << (24 - @bitPosition) >>> 0 if mBits > 24
165 | a += (@stream.peekUInt8(4)) * Math.pow(2, 32 - @bitPosition) if mBits > 32
166 |
167 | if mBits >= 32
168 | a %= Math.pow(2, bits)
169 | else
170 | a &= (1 << bits) - 1
171 |
172 | if signed
173 | # if the sign bit is turned on, flip the bits and
174 | # add one to convert to a negative value
175 | if mBits < 32
176 | if a >>> (bits - 1)
177 | a = ((1 << bits >>> 0) - a) * -1
178 | else
179 | if a / Math.pow(2, bits - 1) | 0
180 | a = (Math.pow(2, bits) - a) * -1
181 |
182 | return a
--------------------------------------------------------------------------------
/tests/demuxers/m4a.coffee:
--------------------------------------------------------------------------------
1 | #import "shared.coffee"
2 |
3 | module 'demuxers/m4a', ->
4 | demuxerTest 'base',
5 | file: 'm4a/base.m4a'
6 | format:
7 | formatID: 'mp4a'
8 | sampleRate: 44100
9 | bitsPerChannel: 16
10 | channelsPerFrame: 2
11 | floatingPoint: false
12 | littleEndian: false
13 | duration: 38707
14 | metadata:
15 | title: 'base'
16 | album: 'Test Album'
17 | artist: 'AAC.js'
18 | comments: 'This is a test description.'
19 | composer: 'Devon Govett'
20 | encoder: 'GarageBand 6.0.5'
21 | data: '89f4b24e'
22 |
23 | demuxerTest 'moov atom at end',
24 | file: 'm4a/moov_end.m4a'
25 | format:
26 | formatID: 'mp4a'
27 | sampleRate: 44100
28 | bitsPerChannel: 16
29 | channelsPerFrame: 2
30 | floatingPoint: false
31 | littleEndian: false
32 | duration: 38707
33 | metadata:
34 | title: 'moov_end'
35 | album: 'Test Album'
36 | artist: 'AAC.js'
37 | comments: 'This is a test description.'
38 | composer: 'Devon Govett'
39 | encoder: 'GarageBand 6.0.5'
40 | rating: 'None'
41 | data: '89f4b24e'
42 |
43 | demuxerTest 'metadata',
44 | file: 'm4a/metadata.m4a'
45 | format:
46 | formatID: 'mp4a'
47 | sampleRate: 44100
48 | bitsPerChannel: 16
49 | channelsPerFrame: 2
50 | floatingPoint: false
51 | littleEndian: false
52 | duration: 38707
53 | metadata:
54 | album: "Album"
55 | albumArtist: "Album Artist"
56 | artist: "Artist"
57 | category: "Category"
58 | comments: "Comments"
59 | composer: "Composer"
60 | coverArt: '4b87a08c'
61 | copyright: "© Copyright"
62 | description: "Description"
63 | diskNumber: "1 of 0"
64 | encoder: "Encoding Tool"
65 | genre: "Custom Genre"
66 | grouping: "Grouping"
67 | keywords: "Keywords"
68 | longDescription: "Long Description"
69 | lyrics: "Lyrics"
70 | rating: "Clean"
71 | releaseDate: "Release Date"
72 | tempo: 100
73 | title: "Name"
74 | trackNumber: "1 of 0"
75 | data: '89f4b24e'
76 |
77 | demuxerTest 'text+image chapters',
78 | file: 'm4a/chapters.m4a'
79 | duration: 38707
80 | data: '263ad71d'
81 | chapters: [
82 | { title: 'Start', timestamp: 0, duration: 10000 }
83 | { title: '10 Seconds', timestamp: 10000, duration: 15000 }
84 | { title: '25 Seconds', timestamp: 25000, duration: 13706 }
85 | ]
86 |
87 | demuxerTest 'text chapters',
88 | file: 'm4a/chapters2.m4a'
89 | data: '263ad71d'
90 | chapters: [
91 | { title: 'Start', timestamp: 0, duration: 10000 }
92 | { title: '10 Seconds', timestamp: 10000, duration: 15000 }
93 | { title: '25 Seconds', timestamp: 25000, duration: 13706 }
94 | ]
95 |
96 | demuxerTest 'text+url chapters',
97 | file: 'm4a/chapters3.m4a'
98 | data: '263ad71d'
99 | chapters: [
100 | { title: 'Start', timestamp: 0, duration: 10000 }
101 | { title: '10 Seconds', timestamp: 10000, duration: 15000 }
102 | { title: '25 Seconds', timestamp: 25000, duration: 13706 }
103 | ]
104 |
105 | demuxerTest 'text+image+url chapters',
106 | file: 'm4a/chapters4.m4a'
107 | data: '263ad71d'
108 | chapters: [
109 | { title: 'Start', timestamp: 0, duration: 10000 }
110 | { title: '10 Seconds', timestamp: 10000, duration: 15000 }
111 | { title: '25 Seconds', timestamp: 25000, duration: 13706 }
112 | ]
113 |
114 | demuxerTest 'alac',
115 | file: 'm4a/alac.m4a'
116 | duration: 38659
117 | data: 'f685e2c0'
118 | format:
119 | formatID: 'alac'
120 | sampleRate: 44100
121 | bitsPerChannel: 16
122 | channelsPerFrame: 2
123 | floatingPoint: false
124 | littleEndian: false
125 |
126 | demuxerTest 'i8',
127 | file: 'm4a/i8.mov'
128 | duration: 8916
129 | data: 'f12b56ad'
130 | format:
131 | formatID: 'lpcm'
132 | sampleRate: 44100
133 | bitsPerChannel: 8
134 | channelsPerFrame: 2
135 | floatingPoint: false
136 | littleEndian: false
137 |
138 | demuxerTest 'bei16',
139 | file: 'm4a/bei16.mov'
140 | duration: 8916
141 | data: 'd07573bd'
142 | format:
143 | formatID: 'lpcm'
144 | sampleRate: 44100
145 | bitsPerChannel: 16
146 | channelsPerFrame: 2
147 | floatingPoint: false
148 | littleEndian: false
149 |
150 | demuxerTest 'lei16',
151 | file: 'm4a/lei16.mov'
152 | duration: 8916
153 | data: '920d2380'
154 | format:
155 | formatID: 'lpcm'
156 | sampleRate: 44100
157 | bitsPerChannel: 16
158 | channelsPerFrame: 2
159 | floatingPoint: false
160 | littleEndian: true
161 |
162 | demuxerTest 'bei32',
163 | file: 'm4a/bei32.mov'
164 | duration: 8916
165 | data: 'dbaa37f7'
166 | format:
167 | formatID: 'lpcm'
168 | sampleRate: 44100
169 | bitsPerChannel: 32
170 | channelsPerFrame: 2
171 | floatingPoint: false
172 | littleEndian: false
173 | bytesPerFrame: 8
174 | framesPerPacket: 1
175 |
176 | demuxerTest 'lei32',
177 | file: 'm4a/lei32.mov'
178 | duration: 8916
179 | data: 'a4bd0fad'
180 | format:
181 | formatID: 'lpcm'
182 | sampleRate: 44100
183 | bitsPerChannel: 32
184 | channelsPerFrame: 2
185 | floatingPoint: false
186 | littleEndian: true
187 | bytesPerFrame: 8
188 | framesPerPacket: 1
189 |
190 | demuxerTest 'bef32',
191 | file: 'm4a/bef32.mov'
192 | duration: 8916
193 | data: 'e8606b84'
194 | format:
195 | formatID: 'lpcm'
196 | sampleRate: 44100
197 | bitsPerChannel: 32
198 | channelsPerFrame: 2
199 | floatingPoint: true
200 | littleEndian: false
201 | bytesPerFrame: 8
202 | framesPerPacket: 1
203 |
204 | demuxerTest 'lef32',
205 | file: 'm4a/lef32.mov'
206 | duration: 8916
207 | data: 'a41981e4'
208 | format:
209 | formatID: 'lpcm'
210 | sampleRate: 44100
211 | bitsPerChannel: 32
212 | channelsPerFrame: 2
213 | floatingPoint: true
214 | littleEndian: true
215 | bytesPerFrame: 8
216 | framesPerPacket: 1
217 |
218 | demuxerTest 'ulaw',
219 | file: 'm4a/ulaw.mov'
220 | duration: 8916
221 | data: '49c9d650'
222 | format:
223 | formatID: 'ulaw'
224 | sampleRate: 44100
225 | bitsPerChannel: 8
226 | channelsPerFrame: 2
227 | floatingPoint: false
228 | littleEndian: false
--------------------------------------------------------------------------------
/src/devices/resampler.js:
--------------------------------------------------------------------------------
1 | /*
2 | * This resampler is from XAudioJS: https://github.com/grantgalitz/XAudioJS
3 | * Planned to be replaced with src.js, eventually: https://github.com/jussi-kalliokoski/src.js
4 | */
5 |
6 | //JavaScript Audio Resampler (c) 2011 - Grant Galitz
7 | function Resampler(fromSampleRate, toSampleRate, channels, outputBufferSize, noReturn) {
8 | this.fromSampleRate = fromSampleRate;
9 | this.toSampleRate = toSampleRate;
10 | this.channels = channels | 0;
11 | this.outputBufferSize = outputBufferSize;
12 | this.noReturn = !!noReturn;
13 | this.initialize();
14 | }
15 |
16 | Resampler.prototype.initialize = function () {
17 | //Perform some checks:
18 | if (this.fromSampleRate > 0 && this.toSampleRate > 0 && this.channels > 0) {
19 | if (this.fromSampleRate == this.toSampleRate) {
20 | //Setup a resampler bypass:
21 | this.resampler = this.bypassResampler; //Resampler just returns what was passed through.
22 | this.ratioWeight = 1;
23 | }
24 | else {
25 | if (this.fromSampleRate < this.toSampleRate) {
26 | /*
27 | Use generic linear interpolation if upsampling,
28 | as linear interpolation produces a gradient that we want
29 | and works fine with two input sample points per output in this case.
30 | */
31 | this.compileLinearInterpolationFunction();
32 | this.lastWeight = 1;
33 | }
34 | else {
35 | /*
36 | Custom resampler I wrote that doesn't skip samples
37 | like standard linear interpolation in high downsampling.
38 | This is more accurate than linear interpolation on downsampling.
39 | */
40 | this.compileMultiTapFunction();
41 | this.tailExists = false;
42 | this.lastWeight = 0;
43 | }
44 | this.ratioWeight = this.fromSampleRate / this.toSampleRate;
45 | this.initializeBuffers();
46 | }
47 | }
48 | else {
49 | throw(new Error("Invalid settings specified for the resampler."));
50 | }
51 | };
52 |
53 | Resampler.prototype.compileLinearInterpolationFunction = function () {
54 | var toCompile = "var bufferLength = buffer.length;\
55 | var outLength = this.outputBufferSize;\
56 | if ((bufferLength % " + this.channels + ") == 0) {\
57 | if (bufferLength > 0) {\
58 | var ratioWeight = this.ratioWeight;\
59 | var weight = this.lastWeight;\
60 | var firstWeight = 0;\
61 | var secondWeight = 0;\
62 | var sourceOffset = 0;\
63 | var outputOffset = 0;\
64 | var outputBuffer = this.outputBuffer;\
65 | for (; weight < 1; weight += ratioWeight) {\
66 | secondWeight = weight % 1;\
67 | firstWeight = 1 - secondWeight;";
68 | for (var channel = 0; channel < this.channels; ++channel) {
69 | toCompile += "outputBuffer[outputOffset++] = (this.lastOutput[" + channel + "] * firstWeight) + (buffer[" + channel + "] * secondWeight);";
70 | }
71 | toCompile += "}\
72 | weight -= 1;\
73 | for (bufferLength -= " + this.channels + ", sourceOffset = Math.floor(weight) * " + this.channels + "; outputOffset < outLength && sourceOffset < bufferLength;) {\
74 | secondWeight = weight % 1;\
75 | firstWeight = 1 - secondWeight;";
76 | for (var channel = 0; channel < this.channels; ++channel) {
77 | toCompile += "outputBuffer[outputOffset++] = (buffer[sourceOffset" + ((channel > 0) ? (" + " + channel) : "") + "] * firstWeight) + (buffer[sourceOffset + " + (this.channels + channel) + "] * secondWeight);";
78 | }
79 | toCompile += "weight += ratioWeight;\
80 | sourceOffset = Math.floor(weight) * " + this.channels + ";\
81 | }";
82 | for (var channel = 0; channel < this.channels; ++channel) {
83 | toCompile += "this.lastOutput[" + channel + "] = buffer[sourceOffset++];";
84 | }
85 | toCompile += "this.lastWeight = weight % 1;\
86 | return this.bufferSlice(outputOffset);\
87 | }\
88 | else {\
89 | return (this.noReturn) ? 0 : [];\
90 | }\
91 | }\
92 | else {\
93 | throw(new Error(\"Buffer was of incorrect sample length.\"));\
94 | }";
95 | this.resampler = Function("buffer", toCompile);
96 | };
97 |
98 | Resampler.prototype.compileMultiTapFunction = function () {
99 | var toCompile = "var bufferLength = buffer.length;\
100 | var outLength = this.outputBufferSize;\
101 | if ((bufferLength % " + this.channels + ") == 0) {\
102 | if (bufferLength > 0) {\
103 | var ratioWeight = this.ratioWeight;\
104 | var weight = 0;";
105 | for (var channel = 0; channel < this.channels; ++channel) {
106 | toCompile += "var output" + channel + " = 0;"
107 | }
108 | toCompile += "var actualPosition = 0;\
109 | var amountToNext = 0;\
110 | var alreadyProcessedTail = !this.tailExists;\
111 | this.tailExists = false;\
112 | var outputBuffer = this.outputBuffer;\
113 | var outputOffset = 0;\
114 | var currentPosition = 0;\
115 | do {\
116 | if (alreadyProcessedTail) {\
117 | weight = ratioWeight;";
118 | for (channel = 0; channel < this.channels; ++channel) {
119 | toCompile += "output" + channel + " = 0;"
120 | }
121 | toCompile += "}\
122 | else {\
123 | weight = this.lastWeight;";
124 | for (channel = 0; channel < this.channels; ++channel) {
125 | toCompile += "output" + channel + " = this.lastOutput[" + channel + "];"
126 | }
127 | toCompile += "alreadyProcessedTail = true;\
128 | }\
129 | while (weight > 0 && actualPosition < bufferLength) {\
130 | amountToNext = 1 + actualPosition - currentPosition;\
131 | if (weight >= amountToNext) {";
132 | for (channel = 0; channel < this.channels; ++channel) {
133 | toCompile += "output" + channel + " += buffer[actualPosition++] * amountToNext;"
134 | }
135 | toCompile += "currentPosition = actualPosition;\
136 | weight -= amountToNext;\
137 | }\
138 | else {";
139 | for (channel = 0; channel < this.channels; ++channel) {
140 | toCompile += "output" + channel + " += buffer[actualPosition" + ((channel > 0) ? (" + " + channel) : "") + "] * weight;"
141 | }
142 | toCompile += "currentPosition += weight;\
143 | weight = 0;\
144 | break;\
145 | }\
146 | }\
147 | if (weight == 0) {";
148 | for (channel = 0; channel < this.channels; ++channel) {
149 | toCompile += "outputBuffer[outputOffset++] = output" + channel + " / ratioWeight;"
150 | }
151 | toCompile += "}\
152 | else {\
153 | this.lastWeight = weight;";
154 | for (channel = 0; channel < this.channels; ++channel) {
155 | toCompile += "this.lastOutput[" + channel + "] = output" + channel + ";"
156 | }
157 | toCompile += "this.tailExists = true;\
158 | break;\
159 | }\
160 | } while (actualPosition < bufferLength && outputOffset < outLength);\
161 | return this.bufferSlice(outputOffset);\
162 | }\
163 | else {\
164 | return (this.noReturn) ? 0 : [];\
165 | }\
166 | }\
167 | else {\
168 | throw(new Error(\"Buffer was of incorrect sample length.\"));\
169 | }";
170 | this.resampler = Function("buffer", toCompile);
171 | };
172 |
173 | Resampler.prototype.bypassResampler = function (buffer) {
174 | if (this.noReturn) {
175 | //Set the buffer passed as our own, as we don't need to resample it:
176 | this.outputBuffer = buffer;
177 | return buffer.length;
178 | }
179 | else {
180 | //Just return the buffer passsed:
181 | return buffer;
182 | }
183 | };
184 |
185 | Resampler.prototype.bufferSlice = function (sliceAmount) {
186 | if (this.noReturn) {
187 | //If we're going to access the properties directly from this object:
188 | return sliceAmount;
189 | }
190 | else {
191 | //Typed array and normal array buffer section referencing:
192 | try {
193 | return this.outputBuffer.subarray(0, sliceAmount);
194 | }
195 | catch (error) {
196 | try {
197 | //Regular array pass:
198 | this.outputBuffer.length = sliceAmount;
199 | return this.outputBuffer;
200 | }
201 | catch (error) {
202 | //Nightly Firefox 4 used to have the subarray function named as slice:
203 | return this.outputBuffer.slice(0, sliceAmount);
204 | }
205 | }
206 | }
207 | };
208 |
209 | Resampler.prototype.initializeBuffers = function () {
210 | //Initialize the internal buffer:
211 | try {
212 | this.outputBuffer = new Float32Array(this.outputBufferSize);
213 | this.lastOutput = new Float32Array(this.channels);
214 | }
215 | catch (error) {
216 | this.outputBuffer = [];
217 | this.lastOutput = [];
218 | }
219 | };
--------------------------------------------------------------------------------
/tests/core/bitstream.coffee:
--------------------------------------------------------------------------------
1 | module 'core/bitstream', ->
2 | makeBitstream = (bytes) ->
3 | bytes = new Uint8Array(bytes)
4 | stream = AV.Stream.fromBuffer(new AV.Buffer(bytes))
5 | return new AV.Bitstream(stream)
6 |
7 | test 'copy', ->
8 | bitstream = makeBitstream [10, 160], [20, 29, 119]
9 | copy = bitstream.copy()
10 |
11 | assert.notEqual copy, bitstream
12 | assert.deepEqual copy, bitstream
13 |
14 | test 'advance', ->
15 | bitstream = makeBitstream [10, 160]
16 |
17 | assert.equal 0, bitstream.bitPosition
18 | assert.equal 0, bitstream.offset()
19 |
20 | bitstream.advance(2)
21 | assert.equal 2, bitstream.bitPosition
22 | assert.equal 2, bitstream.offset()
23 |
24 | bitstream.advance(7)
25 | assert.equal 1, bitstream.bitPosition
26 | assert.equal 9, bitstream.offset()
27 |
28 | assert.throws ->
29 | bitstream.advance(40)
30 | , AV.UnderflowError
31 |
32 | test 'rewind', ->
33 | bitstream = makeBitstream [10, 160]
34 |
35 | assert.equal 0, bitstream.bitPosition
36 | assert.equal 0, bitstream.offset()
37 |
38 | bitstream.advance(2)
39 | assert.equal 2, bitstream.bitPosition
40 | assert.equal 2, bitstream.offset()
41 |
42 | bitstream.rewind(2)
43 | assert.equal 0, bitstream.bitPosition
44 | assert.equal 0, bitstream.offset()
45 |
46 | bitstream.advance(10)
47 | assert.equal 2, bitstream.bitPosition
48 | assert.equal 10, bitstream.offset()
49 |
50 | bitstream.rewind(4)
51 | assert.equal 6, bitstream.bitPosition
52 | assert.equal 6, bitstream.offset()
53 |
54 | assert.throws ->
55 | bitstream.rewind(10)
56 | , AV.UnderflowError
57 |
58 | test 'seek', ->
59 | bitstream = makeBitstream [10, 160]
60 |
61 | assert.equal 0, bitstream.bitPosition
62 | assert.equal 0, bitstream.offset()
63 |
64 | bitstream.seek(3)
65 | assert.equal 3, bitstream.bitPosition
66 | assert.equal 3, bitstream.offset()
67 |
68 | bitstream.seek(10)
69 | assert.equal 2, bitstream.bitPosition
70 | assert.equal 10, bitstream.offset()
71 |
72 | bitstream.seek(4)
73 | assert.equal 4, bitstream.bitPosition
74 | assert.equal 4, bitstream.offset()
75 |
76 | assert.throws ->
77 | bitstream.seek(100)
78 | , AV.UnderflowError
79 |
80 | assert.throws ->
81 | bitstream.seek(-10)
82 | , AV.UnderflowError
83 |
84 | test 'align', ->
85 | bitstream = makeBitstream [10, 160]
86 |
87 | assert.equal 0, bitstream.bitPosition
88 | assert.equal 0, bitstream.offset()
89 |
90 | bitstream.align()
91 | assert.equal 0, bitstream.bitPosition
92 | assert.equal 0, bitstream.offset()
93 |
94 | bitstream.seek(2)
95 | bitstream.align()
96 | assert.equal 0, bitstream.bitPosition
97 | assert.equal 8, bitstream.offset()
98 |
99 | test 'read/peek unsigned', ->
100 | # 0101 1101 0110 1111 1010 1110 1100 1000 -> 0x5d6faec8
101 | # 0111 0000 1001 1010 0010 0101 1111 0011 -> 0x709a25f3
102 | bitstream = makeBitstream [0x5d, 0x6f, 0xae, 0xc8, 0x70, 0x9a, 0x25, 0xf3]
103 |
104 | assert.equal 1, bitstream.peek(2)
105 | assert.equal 1, bitstream.read(2)
106 |
107 | assert.equal 7, bitstream.peek(4)
108 | assert.equal 7, bitstream.read(4)
109 |
110 | assert.equal 0x16f, bitstream.peek(10)
111 | assert.equal 0x16f, bitstream.read(10)
112 |
113 | assert.equal 0xaec8, bitstream.peek(16)
114 | assert.equal 0xaec8, bitstream.read(16)
115 |
116 | assert.equal 0x709a25f3, bitstream.peek(32)
117 | assert.equal 0x384d12f9, bitstream.peek(31)
118 | assert.equal 0x384d12f9, bitstream.read(31)
119 |
120 | assert.equal 1, bitstream.peek(1)
121 | assert.equal 1, bitstream.read(1)
122 |
123 | bitstream = makeBitstream [0x5d, 0x6f, 0xae, 0xc8, 0x70]
124 | assert.equal 0x5d6faec870, bitstream.peek(40)
125 | assert.equal 0x5d6faec870, bitstream.read(40)
126 |
127 | bitstream = makeBitstream [0x5d, 0x6f, 0xae, 0xc8, 0x70]
128 | assert.equal 1, bitstream.read(2)
129 | assert.equal 0xeb7d7643, bitstream.peek(33)
130 | assert.equal 0xeb7d7643, bitstream.read(33)
131 |
132 | bitstream = makeBitstream [0xff, 0xff, 0xff, 0xff, 0xff]
133 | assert.equal 0xf, bitstream.peek(4)
134 | assert.equal 0xff, bitstream.peek(8)
135 | assert.equal 0xfff, bitstream.peek(12)
136 | assert.equal 0xffff, bitstream.peek(16)
137 | assert.equal 0xfffff, bitstream.peek(20)
138 | assert.equal 0xffffff, bitstream.peek(24)
139 | assert.equal 0xfffffff, bitstream.peek(28)
140 | assert.equal 0xffffffff, bitstream.peek(32)
141 | assert.equal 0xfffffffff, bitstream.peek(36)
142 | assert.equal 0xffffffffff, bitstream.peek(40)
143 |
144 | test 'read/peek signed', ->
145 | bitstream = makeBitstream [0x5d, 0x6f, 0xae, 0xc8, 0x70, 0x9a, 0x25, 0xf3]
146 |
147 | assert.equal 5, bitstream.peek(4, true)
148 | assert.equal 5, bitstream.read(4, true)
149 |
150 | assert.equal -3, bitstream.peek(4, true)
151 | assert.equal -3, bitstream.read(4, true)
152 |
153 | assert.equal 6, bitstream.peek(4, true)
154 | assert.equal 6, bitstream.read(4, true)
155 |
156 | assert.equal -1, bitstream.peek(4, true)
157 | assert.equal -1, bitstream.read(4, true)
158 |
159 | assert.equal -82, bitstream.peek(8, true)
160 | assert.equal -82, bitstream.read(8, true)
161 |
162 | assert.equal -889, bitstream.peek(12, true)
163 | assert.equal -889, bitstream.read(12, true)
164 |
165 | assert.equal 9, bitstream.peek(8, true)
166 | assert.equal 9, bitstream.read(8, true)
167 |
168 | assert.equal -191751, bitstream.peek(19, true)
169 | assert.equal -191751, bitstream.read(19, true)
170 |
171 | assert.equal -1, bitstream.peek(1, true)
172 | assert.equal -1, bitstream.read(1, true)
173 |
174 | bitstream = makeBitstream [0x5d, 0x6f, 0xae, 0xc8, 0x70, 0x9a, 0x25, 0xf3]
175 | bitstream.advance(1)
176 |
177 | assert.equal -9278133113, bitstream.peek(35, true)
178 | assert.equal -9278133113, bitstream.read(35, true)
179 |
180 | bitstream = makeBitstream [0xff, 0xff, 0xff, 0xff, 0xff]
181 | assert.equal -1, bitstream.peek(4, true)
182 | assert.equal -1, bitstream.peek(8, true)
183 | assert.equal -1, bitstream.peek(12, true)
184 | assert.equal -1, bitstream.peek(16, true)
185 | assert.equal -1, bitstream.peek(20, true)
186 | assert.equal -1, bitstream.peek(24, true)
187 | assert.equal -1, bitstream.peek(28, true)
188 | assert.equal -1, bitstream.peek(31, true)
189 | assert.equal -1, bitstream.peek(32, true)
190 | assert.equal -1, bitstream.peek(36, true)
191 | assert.equal -1, bitstream.peek(40, true)
192 |
193 | test 'readLSB unsigned', ->
194 | # { byte 1 }{ byte 2 }
195 | # { 3 2 1 }{ 3 }
196 | # { 1][111] [1100] }{ [0000 1000 } -> 0xfc08
197 | bitstream = makeBitstream [0xfc, 0x08]
198 |
199 | assert.equal 12, bitstream.peekLSB(4)
200 | assert.equal 12, bitstream.readLSB(4)
201 |
202 | assert.equal 7, bitstream.peekLSB(3)
203 | assert.equal 7, bitstream.readLSB(3)
204 |
205 | assert.equal 0x11, bitstream.peekLSB(9)
206 | assert.equal 0x11, bitstream.readLSB(9)
207 |
208 | # 4 3 2 1
209 | # [0111 0000] [1001 1010] [0010 0101] 1[111 0011] -> 0x709a25f3
210 | bitstream = makeBitstream [0x70, 0x9a, 0x25, 0xf3]
211 | assert.equal 0xf3259a70, bitstream.peekLSB(32)
212 | assert.equal 0x73259a70, bitstream.peekLSB(31)
213 | assert.equal 0x73259a70, bitstream.readLSB(31)
214 |
215 | assert.equal 1, bitstream.peekLSB(1)
216 | assert.equal 1, bitstream.readLSB(1)
217 |
218 | bitstream = makeBitstream [0xc8, 0x70, 0x9a, 0x25, 0xf3]
219 | assert.equal 0xf3259a70c8, bitstream.peekLSB(40)
220 | assert.equal 0xf3259a70c8, bitstream.readLSB(40)
221 |
222 | bitstream = makeBitstream [0x70, 0x9a, 0x25, 0xff, 0xf3]
223 | assert.equal 0xf3ff259a70, bitstream.peekLSB(40)
224 | assert.equal 0xf3ff259a70, bitstream.readLSB(40)
225 |
226 | bitstream = makeBitstream [0xff, 0xff, 0xff, 0xff, 0xff]
227 | assert.equal 0xf, bitstream.peekLSB(4)
228 | assert.equal 0xff, bitstream.peekLSB(8)
229 | assert.equal 0xfff, bitstream.peekLSB(12)
230 | assert.equal 0xffff, bitstream.peekLSB(16)
231 | assert.equal 0xfffff, bitstream.peekLSB(20)
232 | assert.equal 0xffffff, bitstream.peekLSB(24)
233 | assert.equal 0xfffffff, bitstream.peekLSB(28)
234 | assert.equal 0xffffffff, bitstream.peekLSB(32)
235 | assert.equal 0xfffffffff, bitstream.peekLSB(36)
236 | assert.equal 0xffffffffff, bitstream.peekLSB(40)
237 |
238 | test 'readLSB signed', ->
239 | bitstream = makeBitstream [0xfc, 0x08]
240 | assert.equal -4, bitstream.peekLSB(4, true)
241 | assert.equal -4, bitstream.readLSB(4, true)
242 |
243 | assert.equal -1, bitstream.peekLSB(3, true)
244 | assert.equal -1, bitstream.readLSB(3, true)
245 |
246 | assert.equal 0x11, bitstream.peekLSB(9, true)
247 | assert.equal 0x11, bitstream.readLSB(9, true)
248 |
249 | bitstream = makeBitstream [0x70, 0x9a, 0x25, 0xf3]
250 | assert.equal -215639440, bitstream.peekLSB(32, true)
251 | assert.equal -215639440, bitstream.peekLSB(31, true)
252 | assert.equal -215639440, bitstream.readLSB(31, true)
253 |
254 | assert.equal -1, bitstream.peekLSB(1, true)
255 | assert.equal -1, bitstream.readLSB(1, true)
256 |
257 | bitstream = makeBitstream [0xc8, 0x70, 0x9a, 0x25, 0xf3]
258 | assert.equal -55203696440, bitstream.peekLSB(40, true)
259 | assert.equal -55203696440, bitstream.readLSB(40, true)
260 |
261 | bitstream = makeBitstream [0x70, 0x9a, 0x25, 0xff, 0xf3]
262 | assert.equal -51553920400, bitstream.peekLSB(40, true)
263 | assert.equal -51553920400, bitstream.readLSB(40, true)
264 |
265 | bitstream = makeBitstream [0xff, 0xff, 0xff, 0xff, 0xff]
266 | assert.equal -1, bitstream.peekLSB(4, true)
267 | assert.equal -1, bitstream.peekLSB(8, true)
268 | assert.equal -1, bitstream.peekLSB(12, true)
269 | assert.equal -1, bitstream.peekLSB(16, true)
270 | assert.equal -1, bitstream.peekLSB(20, true)
271 | assert.equal -1, bitstream.peekLSB(24, true)
272 | assert.equal -1, bitstream.peekLSB(28, true)
273 | assert.equal -1, bitstream.peekLSB(31, true)
274 | assert.equal -1, bitstream.peekLSB(32, true)
275 | assert.equal -1, bitstream.peekLSB(36, true)
276 | assert.equal -1, bitstream.peekLSB(40, true)
--------------------------------------------------------------------------------
/src/core/stream.coffee:
--------------------------------------------------------------------------------
1 | class AV.Stream
2 | buf = new ArrayBuffer(16)
3 | uint8 = new Uint8Array(buf)
4 | int8 = new Int8Array(buf)
5 | uint16 = new Uint16Array(buf)
6 | int16 = new Int16Array(buf)
7 | uint32 = new Uint32Array(buf)
8 | int32 = new Int32Array(buf)
9 | float32 = new Float32Array(buf)
10 | float64 = new Float64Array(buf) if Float64Array?
11 |
12 | # detect the native endianness of the machine
13 | # 0x3412 is little endian, 0x1234 is big endian
14 | nativeEndian = new Uint16Array(new Uint8Array([0x12, 0x34]).buffer)[0] is 0x3412
15 |
16 | # define an error class to be thrown if an underflow occurs
17 | class AV.UnderflowError extends Error
18 | constructor: ->
19 | @name = 'AV.UnderflowError'
20 |
21 | constructor: (@list) ->
22 | @localOffset = 0
23 | @offset = 0
24 |
25 | @fromBuffer: (buffer) ->
26 | list = new AV.BufferList
27 | list.append(buffer)
28 | return new AV.Stream(list)
29 |
30 | copy: ->
31 | result = new AV.Stream(@list.copy())
32 | result.localOffset = @localOffset
33 | result.offset = @offset
34 | return result
35 |
36 | available: (bytes) ->
37 | return bytes <= @list.availableBytes - @localOffset
38 |
39 | remainingBytes: ->
40 | return @list.availableBytes - @localOffset
41 |
42 | advance: (bytes) ->
43 | if not @available bytes
44 | throw new AV.UnderflowError()
45 |
46 | @localOffset += bytes
47 | @offset += bytes
48 |
49 | while @list.first and @localOffset >= @list.first.length
50 | @localOffset -= @list.first.length
51 | @list.advance()
52 |
53 | return this
54 |
55 | rewind: (bytes) ->
56 | if bytes > @offset
57 | throw new AV.UnderflowError()
58 |
59 | # if we're at the end of the bufferlist, seek from the end
60 | if not @list.first
61 | @list.rewind()
62 | @localOffset = @list.first.length
63 |
64 | @localOffset -= bytes
65 | @offset -= bytes
66 |
67 | while @list.first.prev and @localOffset < 0
68 | @list.rewind()
69 | @localOffset += @list.first.length
70 |
71 | return this
72 |
73 | seek: (position) ->
74 | if position > @offset
75 | @advance position - @offset
76 |
77 | else if position < @offset
78 | @rewind @offset - position
79 |
80 | readUInt8: ->
81 | if not @available(1)
82 | throw new AV.UnderflowError()
83 |
84 | a = @list.first.data[@localOffset]
85 | @localOffset += 1
86 | @offset += 1
87 |
88 | if @localOffset == @list.first.length
89 | @localOffset = 0
90 | @list.advance()
91 |
92 | return a
93 |
94 | peekUInt8: (offset = 0) ->
95 | if not @available(offset + 1)
96 | throw new AV.UnderflowError()
97 |
98 | offset = @localOffset + offset
99 | buffer = @list.first
100 |
101 | while buffer
102 | if buffer.length > offset
103 | return buffer.data[offset]
104 |
105 | offset -= buffer.length
106 | buffer = buffer.next
107 |
108 | return 0
109 |
110 | read: (bytes, littleEndian = false) ->
111 | if littleEndian is nativeEndian
112 | for i in [0...bytes] by 1
113 | uint8[i] = @readUInt8()
114 | else
115 | for i in [bytes - 1..0] by -1
116 | uint8[i] = @readUInt8()
117 |
118 | return
119 |
120 | peek: (bytes, offset, littleEndian = false) ->
121 | if littleEndian is nativeEndian
122 | for i in [0...bytes] by 1
123 | uint8[i] = @peekUInt8(offset + i)
124 | else
125 | for i in [0...bytes] by 1
126 | uint8[bytes - i - 1] = @peekUInt8(offset + i)
127 |
128 | return
129 |
130 | readInt8: ->
131 | @read(1)
132 | return int8[0]
133 |
134 | peekInt8: (offset = 0) ->
135 | @peek(1, offset)
136 | return int8[0]
137 |
138 | readUInt16: (littleEndian) ->
139 | @read(2, littleEndian)
140 | return uint16[0]
141 |
142 | peekUInt16: (offset = 0, littleEndian) ->
143 | @peek(2, offset, littleEndian)
144 | return uint16[0]
145 |
146 | readInt16: (littleEndian) ->
147 | @read(2, littleEndian)
148 | return int16[0]
149 |
150 | peekInt16: (offset = 0, littleEndian) ->
151 | @peek(2, offset, littleEndian)
152 | return int16[0]
153 |
154 | readUInt24: (littleEndian) ->
155 | if littleEndian
156 | return @readUInt16(true) + (@readUInt8() << 16)
157 | else
158 | return (@readUInt16() << 8) + @readUInt8()
159 |
160 | peekUInt24: (offset = 0, littleEndian) ->
161 | if littleEndian
162 | return @peekUInt16(offset, true) + (@peekUInt8(offset + 2) << 16)
163 | else
164 | return (@peekUInt16(offset) << 8) + @peekUInt8(offset + 2)
165 |
166 | readInt24: (littleEndian) ->
167 | if littleEndian
168 | return @readUInt16(true) + (@readInt8() << 16)
169 | else
170 | return (@readInt16() << 8) + @readUInt8()
171 |
172 | peekInt24: (offset = 0, littleEndian) ->
173 | if littleEndian
174 | return @peekUInt16(offset, true) + (@peekInt8(offset + 2) << 16)
175 | else
176 | return (@peekInt16(offset) << 8) + @peekUInt8(offset + 2)
177 |
178 | readUInt32: (littleEndian) ->
179 | @read(4, littleEndian)
180 | return uint32[0]
181 |
182 | peekUInt32: (offset = 0, littleEndian) ->
183 | @peek(4, offset, littleEndian)
184 | return uint32[0]
185 |
186 | readInt32: (littleEndian) ->
187 | @read(4, littleEndian)
188 | return int32[0]
189 |
190 | peekInt32: (offset = 0, littleEndian) ->
191 | @peek(4, offset, littleEndian)
192 | return int32[0]
193 |
194 | readFloat32: (littleEndian) ->
195 | @read(4, littleEndian)
196 | return float32[0]
197 |
198 | peekFloat32: (offset = 0, littleEndian) ->
199 | @peek(4, offset, littleEndian)
200 | return float32[0]
201 |
202 | readFloat64: (littleEndian) ->
203 | @read(8, littleEndian)
204 |
205 | # use Float64Array if available
206 | if float64
207 | return float64[0]
208 | else
209 | return float64Fallback()
210 |
211 | float64Fallback = ->
212 | [low, high] = uint32
213 | return 0.0 if not high or high is 0x80000000
214 |
215 | sign = 1 - (high >>> 31) * 2 # +1 or -1
216 | exp = (high >>> 20) & 0x7ff
217 | frac = high & 0xfffff
218 |
219 | # NaN or Infinity
220 | if exp is 0x7ff
221 | return NaN if frac
222 | return sign * Infinity
223 |
224 | exp -= 1023
225 | out = (frac | 0x100000) * Math.pow(2, exp - 20)
226 | out += low * Math.pow(2, exp - 52)
227 |
228 | return sign * out
229 |
230 | peekFloat64: (offset = 0, littleEndian) ->
231 | @peek(8, offset, littleEndian)
232 |
233 | # use Float64Array if available
234 | if float64
235 | return float64[0]
236 | else
237 | return float64Fallback()
238 |
239 | # IEEE 80 bit extended float
240 | readFloat80: (littleEndian) ->
241 | @read(10, littleEndian)
242 | return float80()
243 |
244 | float80 = ->
245 | [high, low] = uint32
246 | a0 = uint8[9]
247 | a1 = uint8[8]
248 |
249 | sign = 1 - (a0 >>> 7) * 2 # -1 or +1
250 | exp = ((a0 & 0x7F) << 8) | a1
251 |
252 | if exp is 0 and low is 0 and high is 0
253 | return 0
254 |
255 | if exp is 0x7fff
256 | if low is 0 and high is 0
257 | return sign * Infinity
258 |
259 | return NaN
260 |
261 | exp -= 16383
262 | out = low * Math.pow(2, exp - 31)
263 | out += high * Math.pow(2, exp - 63)
264 |
265 | return sign * out
266 |
267 | peekFloat80: (offset = 0, littleEndian) ->
268 | @peek(10, offset, littleEndian)
269 | return float80()
270 |
271 | readBuffer: (length) ->
272 | result = AV.Buffer.allocate(length)
273 | to = result.data
274 |
275 | for i in [0...length] by 1
276 | to[i] = @readUInt8()
277 |
278 | return result
279 |
280 | peekBuffer: (offset = 0, length) ->
281 | result = AV.Buffer.allocate(length)
282 | to = result.data
283 |
284 | for i in [0...length] by 1
285 | to[i] = @peekUInt8(offset + i)
286 |
287 | return result
288 |
289 | readSingleBuffer: (length) ->
290 | result = @list.first.slice(@localOffset, length)
291 | @advance(result.length)
292 | return result
293 |
294 | peekSingleBuffer: (offset, length) ->
295 | result = @list.first.slice(@localOffset + offset, length)
296 | return result
297 |
298 | readString: (length, encoding = 'ascii') ->
299 | return decodeString.call this, 0, length, encoding, true
300 |
301 | peekString: (offset = 0, length, encoding = 'ascii') ->
302 | return decodeString.call this, offset, length, encoding, false
303 |
304 | decodeString = (offset, length, encoding, advance) ->
305 | encoding = encoding.toLowerCase()
306 | nullEnd = if length is null then 0 else -1
307 |
308 | length = Infinity if not length?
309 | end = offset + length
310 | result = ''
311 |
312 | switch encoding
313 | when 'ascii', 'latin1'
314 | while offset < end and (c = @peekUInt8(offset++)) isnt nullEnd
315 | result += String.fromCharCode(c)
316 |
317 | when 'utf8', 'utf-8'
318 | while offset < end and (b1 = @peekUInt8(offset++)) isnt nullEnd
319 | if (b1 & 0x80) is 0
320 | result += String.fromCharCode b1
321 |
322 | # one continuation (128 to 2047)
323 | else if (b1 & 0xe0) is 0xc0
324 | b2 = @peekUInt8(offset++) & 0x3f
325 | result += String.fromCharCode ((b1 & 0x1f) << 6) | b2
326 |
327 | # two continuation (2048 to 55295 and 57344 to 65535)
328 | else if (b1 & 0xf0) is 0xe0
329 | b2 = @peekUInt8(offset++) & 0x3f
330 | b3 = @peekUInt8(offset++) & 0x3f
331 | result += String.fromCharCode ((b1 & 0x0f) << 12) | (b2 << 6) | b3
332 |
333 | # three continuation (65536 to 1114111)
334 | else if (b1 & 0xf8) is 0xf0
335 | b2 = @peekUInt8(offset++) & 0x3f
336 | b3 = @peekUInt8(offset++) & 0x3f
337 | b4 = @peekUInt8(offset++) & 0x3f
338 |
339 | # split into a surrogate pair
340 | pt = (((b1 & 0x0f) << 18) | (b2 << 12) | (b3 << 6) | b4) - 0x10000
341 | result += String.fromCharCode 0xd800 + (pt >> 10), 0xdc00 + (pt & 0x3ff)
342 |
343 | when 'utf16-be', 'utf16be', 'utf16le', 'utf16-le', 'utf16bom', 'utf16-bom'
344 | # find endianness
345 | switch encoding
346 | when 'utf16be', 'utf16-be'
347 | littleEndian = false
348 |
349 | when 'utf16le', 'utf16-le'
350 | littleEndian = true
351 |
352 | when 'utf16bom', 'utf16-bom'
353 | if length < 2 or (bom = @peekUInt16(offset)) is nullEnd
354 | @advance offset += 2 if advance
355 | return result
356 |
357 | littleEndian = (bom is 0xfffe)
358 | offset += 2
359 |
360 | while offset < end and (w1 = @peekUInt16(offset, littleEndian)) isnt nullEnd
361 | offset += 2
362 |
363 | if w1 < 0xd800 or w1 > 0xdfff
364 | result += String.fromCharCode(w1)
365 |
366 | else
367 | if w1 > 0xdbff
368 | throw new Error "Invalid utf16 sequence."
369 |
370 | w2 = @peekUInt16(offset, littleEndian)
371 | if w2 < 0xdc00 or w2 > 0xdfff
372 | throw new Error "Invalid utf16 sequence."
373 |
374 | result += String.fromCharCode(w1, w2)
375 | offset += 2
376 |
377 | if w1 is nullEnd
378 | offset += 2
379 |
380 | else
381 | throw new Error "Unknown encoding: #{encoding}"
382 |
383 | @advance offset if advance
384 | return result
--------------------------------------------------------------------------------
/src/demuxers/m4a.coffee:
--------------------------------------------------------------------------------
1 | class M4ADemuxer extends AV.Demuxer
2 | AV.Demuxer.register(M4ADemuxer)
3 |
4 | # common file type identifiers
5 | # see http://mp4ra.org/filetype.html for a complete list
6 | TYPES = ['M4A ', 'M4P ', 'M4B ', 'M4V ', 'isom', 'mp42', 'qt ']
7 |
8 | @probe: (buffer) ->
9 | return buffer.peekString(4, 4) is 'ftyp' and
10 | buffer.peekString(8, 4) in TYPES
11 |
12 | init: ->
13 | # current atom heirarchy stacks
14 | @atoms = []
15 | @offsets = []
16 |
17 | # m4a files can have multiple tracks
18 | @track = null
19 | @tracks = []
20 |
21 | # lookup table for atom handlers
22 | atoms = {}
23 |
24 | # lookup table of container atom names
25 | containers = {}
26 |
27 | # declare a function to be used for parsing a given atom name
28 | atom = (name, fn) ->
29 | c = []
30 | for container in name.split('.').slice(0, -1)
31 | c.push container
32 | containers[c.join('.')] = true
33 |
34 | atoms[name] ?= {}
35 | atoms[name].fn = fn
36 |
37 | # declare a function to be called after parsing of an atom and all sub-atoms has completed
38 | after = (name, fn) ->
39 | atoms[name] ?= {}
40 | atoms[name].after = fn
41 |
42 | readChunk: ->
43 | @break = false
44 |
45 | while @stream.available(1) and not @break
46 | # if we're ready to read a new atom, add it to the stack
47 | if not @readHeaders
48 | return unless @stream.available(8)
49 |
50 | @len = @stream.readUInt32() - 8
51 | @type = @stream.readString(4)
52 |
53 | continue if @len is 0
54 |
55 | @atoms.push @type
56 | @offsets.push @stream.offset + @len
57 | @readHeaders = true
58 |
59 | # find a handler for the current atom heirarchy
60 | path = @atoms.join '.'
61 | handler = atoms[path]
62 |
63 | if handler?.fn
64 | # wait until we have enough data, unless this is the mdat atom
65 | return unless @stream.available(@len) or path is 'mdat'
66 |
67 | # call the parser for the atom type
68 | handler.fn.call(this)
69 |
70 | # check if this atom can contain sub-atoms
71 | if path of containers
72 | @readHeaders = false
73 |
74 | # handle container atoms
75 | else if path of containers
76 | @readHeaders = false
77 |
78 | # unknown atom
79 | else
80 | # wait until we have enough data
81 | return unless @stream.available(@len)
82 | @stream.advance(@len)
83 |
84 | # pop completed items from the stack
85 | while @stream.offset >= @offsets[@offsets.length - 1]
86 | # call after handler
87 | handler = atoms[@atoms.join '.']
88 | if handler?.after
89 | handler.after.call(this)
90 |
91 | type = @atoms.pop()
92 | @offsets.pop()
93 | @readHeaders = false
94 |
95 | atom 'ftyp', ->
96 | if @stream.readString(4) not in TYPES
97 | return @emit 'error', 'Not a valid M4A file.'
98 |
99 | @stream.advance(@len - 4)
100 |
101 | atom 'moov.trak', ->
102 | @track = {}
103 | @tracks.push @track
104 |
105 | atom 'moov.trak.tkhd', ->
106 | @stream.advance(4) # version and flags
107 |
108 | @stream.advance(8) # creation and modification time
109 | @track.id = @stream.readUInt32()
110 |
111 | @stream.advance(@len - 16)
112 |
113 | atom 'moov.trak.mdia.hdlr', ->
114 | @stream.advance(4) # version and flags
115 |
116 | @stream.advance(4) # component type
117 | @track.type = @stream.readString(4)
118 |
119 | @stream.advance(12) # component manufacturer, flags, and mask
120 | @stream.advance(@len - 24) # component name
121 |
122 | atom 'moov.trak.mdia.mdhd', ->
123 | @stream.advance(4) # version and flags
124 | @stream.advance(8) # creation and modification dates
125 |
126 | @track.timeScale = @stream.readUInt32()
127 | @track.duration = @stream.readUInt32()
128 |
129 | @stream.advance(4) # language and quality
130 |
131 | # corrections to bits per channel, base on formatID
132 | # (ffmpeg appears to always encode the bitsPerChannel as 16)
133 | BITS_PER_CHANNEL =
134 | ulaw: 8
135 | alaw: 8
136 | in24: 24
137 | in32: 32
138 | fl32: 32
139 | fl64: 64
140 |
141 | atom 'moov.trak.mdia.minf.stbl.stsd', ->
142 | @stream.advance(4) # version and flags
143 |
144 | numEntries = @stream.readUInt32()
145 |
146 | # just ignore the rest of the atom if this isn't an audio track
147 | if @track.type isnt 'soun'
148 | return @stream.advance(@len - 8)
149 |
150 | if numEntries isnt 1
151 | return @emit 'error', "Only expecting one entry in sample description atom!"
152 |
153 | @stream.advance(4) # size
154 |
155 | format = @track.format = {}
156 | format.formatID = @stream.readString(4)
157 |
158 | @stream.advance(6) # reserved
159 | @stream.advance(2) # data reference index
160 |
161 | version = @stream.readUInt16()
162 | @stream.advance(6) # skip revision level and vendor
163 |
164 | format.channelsPerFrame = @stream.readUInt16()
165 | format.bitsPerChannel = @stream.readUInt16()
166 |
167 | @stream.advance(4) # skip compression id and packet size
168 |
169 | format.sampleRate = @stream.readUInt16()
170 | @stream.advance(2)
171 |
172 | if version is 1
173 | format.framesPerPacket = @stream.readUInt32()
174 | @stream.advance(4) # bytes per packet
175 | format.bytesPerFrame = @stream.readUInt32()
176 | @stream.advance(4) # bytes per sample
177 |
178 | else if version isnt 0
179 | @emit 'error', 'Unknown version in stsd atom'
180 |
181 | if BITS_PER_CHANNEL[format.formatID]?
182 | format.bitsPerChannel = BITS_PER_CHANNEL[format.formatID]
183 |
184 | format.floatingPoint = format.formatID in ['fl32', 'fl64']
185 | format.littleEndian = format.formatID is 'sowt' and format.bitsPerChannel > 8
186 |
187 | if format.formatID in ['twos', 'sowt', 'in24', 'in32', 'fl32', 'fl64', 'raw ', 'NONE']
188 | format.formatID = 'lpcm'
189 |
190 | atom 'moov.trak.mdia.minf.stbl.stsd.alac', ->
191 | @stream.advance(4)
192 | @track.cookie = @stream.readBuffer(@len - 4)
193 |
194 | atom 'moov.trak.mdia.minf.stbl.stsd.esds', ->
195 | offset = @stream.offset + @len
196 | @track.cookie = M4ADemuxer.readEsds @stream
197 | @stream.seek offset # skip garbage at the end
198 |
199 | atom 'moov.trak.mdia.minf.stbl.stsd.wave.enda', ->
200 | @track.format.littleEndian = !!@stream.readUInt16()
201 |
202 | # reads a variable length integer
203 | @readDescrLen: (stream) ->
204 | len = 0
205 | count = 4
206 |
207 | while count--
208 | c = stream.readUInt8()
209 | len = (len << 7) | (c & 0x7f)
210 | break unless c & 0x80
211 |
212 | return len
213 |
214 | @readEsds: (stream) ->
215 | stream.advance(4) # version and flags
216 |
217 | tag = stream.readUInt8()
218 | len = M4ADemuxer.readDescrLen(stream)
219 |
220 | if tag is 0x03 # MP4ESDescrTag
221 | stream.advance(2) # id
222 | flags = stream.readUInt8()
223 |
224 | if flags & 0x80 # streamDependenceFlag
225 | stream.advance(2)
226 |
227 | if flags & 0x40 # URL_Flag
228 | stream.advance stream.readUInt8()
229 |
230 | if flags & 0x20 # OCRstreamFlag
231 | stream.advance(2)
232 |
233 | else
234 | stream.advance(2) # id
235 |
236 | tag = stream.readUInt8()
237 | len = M4ADemuxer.readDescrLen(stream)
238 |
239 | if tag is 0x04 # MP4DecConfigDescrTag
240 | codec_id = stream.readUInt8() # might want this... (isom.c:35)
241 | stream.advance(1) # stream type
242 | stream.advance(3) # buffer size
243 | stream.advance(4) # max bitrate
244 | stream.advance(4) # avg bitrate
245 |
246 | tag = stream.readUInt8()
247 | len = M4ADemuxer.readDescrLen(stream)
248 |
249 | if tag is 0x05 # MP4DecSpecificDescrTag
250 | return stream.readBuffer(len)
251 |
252 | return null
253 |
254 | # time to sample
255 | atom 'moov.trak.mdia.minf.stbl.stts', ->
256 | @stream.advance(4) # version and flags
257 |
258 | entries = @stream.readUInt32()
259 | @track.stts = []
260 | for i in [0...entries] by 1
261 | @track.stts[i] =
262 | count: @stream.readUInt32()
263 | duration: @stream.readUInt32()
264 |
265 | @setupSeekPoints()
266 |
267 | # sample to chunk
268 | atom 'moov.trak.mdia.minf.stbl.stsc', ->
269 | @stream.advance(4) # version and flags
270 |
271 | entries = @stream.readUInt32()
272 | @track.stsc = []
273 | for i in [0...entries] by 1
274 | @track.stsc[i] =
275 | first: @stream.readUInt32()
276 | count: @stream.readUInt32()
277 | id: @stream.readUInt32()
278 |
279 | @setupSeekPoints()
280 |
281 | # sample size
282 | atom 'moov.trak.mdia.minf.stbl.stsz', ->
283 | @stream.advance(4) # version and flags
284 |
285 | @track.sampleSize = @stream.readUInt32()
286 | entries = @stream.readUInt32()
287 |
288 | if @track.sampleSize is 0 and entries > 0
289 | @track.sampleSizes = []
290 | for i in [0...entries] by 1
291 | @track.sampleSizes[i] = @stream.readUInt32()
292 |
293 | @setupSeekPoints()
294 |
295 | # chunk offsets
296 | atom 'moov.trak.mdia.minf.stbl.stco', -> # TODO: co64
297 | @stream.advance(4) # version and flags
298 |
299 | entries = @stream.readUInt32()
300 | @track.chunkOffsets = []
301 | for i in [0...entries] by 1
302 | @track.chunkOffsets[i] = @stream.readUInt32()
303 |
304 | @setupSeekPoints()
305 |
306 | # chapter track reference
307 | atom 'moov.trak.tref.chap', ->
308 | entries = @len >> 2
309 | @track.chapterTracks = []
310 | for i in [0...entries] by 1
311 | @track.chapterTracks[i] = @stream.readUInt32()
312 |
313 | return
314 |
315 | # once we have all the information we need, generate the seek table for this track
316 | setupSeekPoints: ->
317 | return unless @track.chunkOffsets? and @track.stsc? and @track.sampleSize? and @track.stts?
318 |
319 | stscIndex = 0
320 | sttsIndex = 0
321 | sttsIndex = 0
322 | sttsSample = 0
323 | sampleIndex = 0
324 |
325 | offset = 0
326 | timestamp = 0
327 | @track.seekPoints = []
328 |
329 | for position, i in @track.chunkOffsets
330 | for j in [0...@track.stsc[stscIndex].count] by 1
331 | # push the timestamp and both the physical position in the file
332 | # and the offset without gaps from the start of the data
333 | @track.seekPoints.push
334 | offset: offset
335 | position: position
336 | timestamp: timestamp
337 |
338 | size = @track.sampleSize or @track.sampleSizes[sampleIndex++]
339 | offset += size
340 | position += size
341 | timestamp += @track.stts[sttsIndex].duration
342 |
343 | if sttsIndex + 1 < @track.stts.length and ++sttsSample is @track.stts[sttsIndex].count
344 | sttsSample = 0
345 | sttsIndex++
346 |
347 | if stscIndex + 1 < @track.stsc.length and i + 1 is @track.stsc[stscIndex + 1].first
348 | stscIndex++
349 |
350 | after 'moov', ->
351 | # if the mdat block was at the beginning rather than the end, jump back to it
352 | if @mdatOffset?
353 | @stream.seek @mdatOffset - 8
354 |
355 | # choose a track
356 | for track in @tracks when track.type is 'soun'
357 | @track = track
358 | break
359 |
360 | if @track.type isnt 'soun'
361 | @track = null
362 | return @emit 'error', 'No audio tracks in m4a file.'
363 |
364 | # emit info
365 | @emit 'format', @track.format
366 | @emit 'duration', @track.duration / @track.timeScale * 1000 | 0
367 | if @track.cookie
368 | @emit 'cookie', @track.cookie
369 |
370 | # use the seek points from the selected track
371 | @seekPoints = @track.seekPoints
372 |
373 | atom 'mdat', ->
374 | if not @startedData
375 | @mdatOffset ?= @stream.offset
376 |
377 | # if we haven't read the headers yet, the mdat atom was at the beginning
378 | # rather than the end. Skip over it for now to read the headers first, and
379 | # come back later.
380 | if @tracks.length is 0
381 | bytes = Math.min(@stream.remainingBytes(), @len)
382 | @stream.advance bytes
383 | @len -= bytes
384 | return
385 |
386 | @chunkIndex = 0
387 | @stscIndex = 0
388 | @sampleIndex = 0
389 | @tailOffset = 0
390 | @tailSamples = 0
391 |
392 | @startedData = true
393 |
394 | # read the chapter information if any
395 | unless @readChapters
396 | @readChapters = @parseChapters()
397 | return if @break = not @readChapters
398 | @stream.seek @mdatOffset
399 |
400 | # get the starting offset
401 | offset = @track.chunkOffsets[@chunkIndex] + @tailOffset
402 | length = 0
403 |
404 | # make sure we have enough data to get to the offset
405 | unless @stream.available(offset - @stream.offset)
406 | @break = true
407 | return
408 |
409 | # seek to the offset
410 | @stream.seek(offset)
411 |
412 | # calculate the maximum length we can read at once
413 | while @chunkIndex < @track.chunkOffsets.length
414 | # calculate the size in bytes of the chunk using the sample size table
415 | numSamples = @track.stsc[@stscIndex].count - @tailSamples
416 | chunkSize = 0
417 | for sample in [0...numSamples] by 1
418 | size = @track.sampleSize or @track.sampleSizes[@sampleIndex]
419 |
420 | # if we don't have enough data to add this sample, jump out
421 | break unless @stream.available(length + size)
422 |
423 | length += size
424 | chunkSize += size
425 | @sampleIndex++
426 |
427 | # if we didn't make it through the whole chunk, add what we did use to the tail
428 | if sample < numSamples
429 | @tailOffset += chunkSize
430 | @tailSamples += sample
431 | break
432 | else
433 | # otherwise, we can move to the next chunk
434 | @chunkIndex++
435 | @tailOffset = 0
436 | @tailSamples = 0
437 |
438 | # if we've made it to the end of a list of subsequent chunks with the same number of samples,
439 | # go to the next sample to chunk entry
440 | if @stscIndex + 1 < @track.stsc.length and @chunkIndex + 1 is @track.stsc[@stscIndex + 1].first
441 | @stscIndex++
442 |
443 | # if the next chunk isn't right after this one, jump out
444 | if offset + length isnt @track.chunkOffsets[@chunkIndex]
445 | break
446 |
447 | # emit some data if we have any, otherwise wait for more
448 | if length > 0
449 | @emit 'data', @stream.readBuffer(length)
450 | @break = @chunkIndex is @track.chunkOffsets.length
451 | else
452 | @break = true
453 |
454 | parseChapters: ->
455 | return true unless @track.chapterTracks?.length > 0
456 |
457 | # find the chapter track
458 | id = @track.chapterTracks[0]
459 | for track in @tracks
460 | break if track.id is id
461 |
462 | if track.id isnt id
463 | @emit 'error', 'Chapter track does not exist.'
464 |
465 | @chapters ?= []
466 |
467 | # use the seek table offsets to find chapter titles
468 | while @chapters.length < track.seekPoints.length
469 | point = track.seekPoints[@chapters.length]
470 |
471 | # make sure we have enough data
472 | return false unless @stream.available(point.position - @stream.offset + 32)
473 |
474 | # jump to the title offset
475 | @stream.seek point.position
476 |
477 | # read the length of the title string
478 | len = @stream.readUInt16()
479 | title = null
480 |
481 | return false unless @stream.available(len)
482 |
483 | # if there is a BOM marker, read a utf16 string
484 | if len > 2
485 | bom = @stream.peekUInt16()
486 | if bom in [0xfeff, 0xfffe]
487 | title = @stream.readString(len, 'utf16-bom')
488 |
489 | # otherwise, use utf8
490 | title ?= @stream.readString(len, 'utf8')
491 |
492 | # add the chapter title, timestamp, and duration
493 | nextTimestamp = track.seekPoints[@chapters.length + 1]?.timestamp ? track.duration
494 | @chapters.push
495 | title: title
496 | timestamp: point.timestamp / track.timeScale * 1000 | 0
497 | duration: (nextTimestamp - point.timestamp) / track.timeScale * 1000 | 0
498 |
499 | # we're done, so emit the chapter data
500 | @emit 'chapters', @chapters
501 | return true
502 |
503 | # metadata chunk
504 | atom 'moov.udta.meta', ->
505 | @metadata = {}
506 | @stream.advance(4) # version and flags
507 |
508 | # emit when we're done
509 | after 'moov.udta.meta', ->
510 | @emit 'metadata', @metadata
511 |
512 | # convienience function to generate metadata atom handler
513 | meta = (field, name, fn) ->
514 | atom "moov.udta.meta.ilst.#{field}.data", ->
515 | @stream.advance(8)
516 | @len -= 8
517 | fn.call this, name
518 |
519 | # string field reader
520 | string = (field) ->
521 | @metadata[field] = @stream.readString(@len, 'utf8')
522 |
523 | # from http://atomicparsley.sourceforge.net/mpeg-4files.html
524 | meta '©alb', 'album', string
525 | meta '©arg', 'arranger', string
526 | meta '©art', 'artist', string
527 | meta '©ART', 'artist', string
528 | meta 'aART', 'albumArtist', string
529 | meta 'catg', 'category', string
530 | meta '©com', 'composer', string
531 | meta '©cpy', 'copyright', string
532 | meta 'cprt', 'copyright', string
533 | meta '©cmt', 'comments', string
534 | meta '©day', 'releaseDate', string
535 | meta 'desc', 'description', string
536 | meta '©gen', 'genre', string # custom genres
537 | meta '©grp', 'grouping', string
538 | meta '©isr', 'ISRC', string
539 | meta 'keyw', 'keywords', string
540 | meta '©lab', 'recordLabel', string
541 | meta 'ldes', 'longDescription', string
542 | meta '©lyr', 'lyrics', string
543 | meta '©nam', 'title', string
544 | meta '©phg', 'recordingCopyright', string
545 | meta '©prd', 'producer', string
546 | meta '©prf', 'performers', string
547 | meta 'purd', 'purchaseDate', string
548 | meta 'purl', 'podcastURL', string
549 | meta '©swf', 'songwriter', string
550 | meta '©too', 'encoder', string
551 | meta '©wrt', 'composer', string
552 |
553 | meta 'covr', 'coverArt', (field) ->
554 | @metadata[field] = @stream.readBuffer(@len)
555 |
556 | # standard genres
557 | genres = [
558 | "Blues", "Classic Rock", "Country", "Dance", "Disco", "Funk", "Grunge",
559 | "Hip-Hop", "Jazz", "Metal", "New Age", "Oldies", "Other", "Pop", "R&B",
560 | "Rap", "Reggae", "Rock", "Techno", "Industrial", "Alternative", "Ska",
561 | "Death Metal", "Pranks", "Soundtrack", "Euro-Techno", "Ambient",
562 | "Trip-Hop", "Vocal", "Jazz+Funk", "Fusion", "Trance", "Classical",
563 | "Instrumental", "Acid", "House", "Game", "Sound Clip", "Gospel", "Noise",
564 | "AlternRock", "Bass", "Soul", "Punk", "Space", "Meditative", "Instrumental Pop",
565 | "Instrumental Rock", "Ethnic", "Gothic", "Darkwave", "Techno-Industrial",
566 | "Electronic", "Pop-Folk", "Eurodance", "Dream", "Southern Rock", "Comedy",
567 | "Cult", "Gangsta", "Top 40", "Christian Rap", "Pop/Funk", "Jungle",
568 | "Native American", "Cabaret", "New Wave", "Psychadelic", "Rave", "Showtunes",
569 | "Trailer", "Lo-Fi", "Tribal", "Acid Punk", "Acid Jazz", "Polka", "Retro",
570 | "Musical", "Rock & Roll", "Hard Rock", "Folk", "Folk/Rock", "National Folk",
571 | "Swing", "Fast Fusion", "Bebob", "Latin", "Revival", "Celtic", "Bluegrass",
572 | "Avantgarde", "Gothic Rock", "Progressive Rock", "Psychedelic Rock", "Symphonic Rock",
573 | "Slow Rock", "Big Band", "Chorus", "Easy Listening", "Acoustic", "Humour", "Speech",
574 | "Chanson", "Opera", "Chamber Music", "Sonata", "Symphony", "Booty Bass", "Primus",
575 | "Porn Groove", "Satire", "Slow Jam", "Club", "Tango", "Samba", "Folklore", "Ballad",
576 | "Power Ballad", "Rhythmic Soul", "Freestyle", "Duet", "Punk Rock", "Drum Solo",
577 | "A Capella", "Euro-House", "Dance Hall"
578 | ]
579 |
580 | meta 'gnre', 'genre', (field) ->
581 | @metadata[field] = genres[@stream.readUInt16() - 1]
582 |
583 | meta 'tmpo', 'tempo', (field) ->
584 | @metadata[field] = @stream.readUInt16()
585 |
586 | meta 'rtng', 'rating', (field) ->
587 | rating = @stream.readUInt8()
588 | @metadata[field] = if rating is 2 then 'Clean' else if rating isnt 0 then 'Explicit' else 'None'
589 |
590 | diskTrack = (field) ->
591 | @stream.advance(2)
592 | @metadata[field] = @stream.readUInt16() + ' of ' + @stream.readUInt16()
593 | @stream.advance(@len - 6)
594 |
595 | meta 'disk', 'diskNumber', diskTrack
596 | meta 'trkn', 'trackNumber', diskTrack
597 |
598 | bool = (field) ->
599 | @metadata[field] = @stream.readUInt8() is 1
600 |
601 | meta 'cpil', 'compilation', bool
602 | meta 'pcst', 'podcast', bool
603 | meta 'pgap', 'gapless', bool
--------------------------------------------------------------------------------