├── .gitignore ├── AppleAudioUnit.podspec ├── AppleAudioUnit └── Classes │ ├── AAUParameterState.c │ ├── AAUParameterState.h │ ├── AVAudioUnitCreator.swift │ ├── AppleAudioUnit.h │ ├── AudioBufferListHelper.c │ ├── AudioBufferListHelper.h │ ├── AudioComponentDescription-Cwift.c │ ├── AudioComponentDescription-Cwift.h │ ├── BufferedAudioUnit.h │ ├── BufferedAudioUnit.m │ ├── EventSlicedAudioUnit.h │ ├── EventSlicedAudioUnit.m │ ├── ParameterizedAudioUnit.h │ └── ParameterizedAudioUnit.m ├── LICENSE └── README.md /.gitignore: -------------------------------------------------------------------------------- 1 | # Xcode 2 | # 3 | # gitignore contributors: remember to update Global/Xcode.gitignore, Objective-C.gitignore & Swift.gitignore 4 | 5 | **/.DS_Store 6 | 7 | ## Build generated 8 | build/ 9 | DerivedData/ 10 | 11 | ## Various settings 12 | *.pbxuser 13 | !default.pbxuser 14 | *.mode1v3 15 | !default.mode1v3 16 | *.mode2v3 17 | !default.mode2v3 18 | *.perspectivev3 19 | !default.perspectivev3 20 | xcuserdata/ 21 | 22 | ## Other 23 | *.moved-aside 24 | *.xccheckout 25 | *.xcscmblueprint 26 | 27 | ## Obj-C/Swift specific 28 | *.hmap 29 | *.ipa 30 | *.dSYM.zip 31 | *.dSYM 32 | 33 | ## Playgrounds 34 | timeline.xctimeline 35 | playground.xcworkspace 36 | 37 | # Swift Package Manager 38 | # 39 | # Add this line if you want to avoid checking in source code from Swift Package Manager dependencies. 40 | # Packages/ 41 | # Package.pins 42 | # Package.resolved 43 | .build/ 44 | 45 | # CocoaPods 46 | # 47 | # We recommend against adding the Pods directory to your .gitignore. However 48 | # you should judge for yourself, the pros and cons are mentioned at: 49 | # https://guides.cocoapods.org/using/using-cocoapods.html#should-i-check-the-pods-directory-into-source-control 50 | # 51 | # Pods/ 52 | 53 | # Carthage 54 | # 55 | # Add this line if you want to avoid checking in source code from Carthage dependencies. 56 | # Carthage/Checkouts 57 | 58 | Carthage/Build 59 | 60 | # fastlane 61 | # 62 | # It is recommended to not store the screenshots in the git repo. Instead, use fastlane to re-generate the 63 | # screenshots whenever they are needed. 64 | # For more information about the recommended setup visit: 65 | # https://docs.fastlane.tools/best-practices/source-control/#source-control 66 | 67 | fastlane/report.xml 68 | fastlane/Preview.html 69 | fastlane/screenshots/**/*.png 70 | fastlane/test_output 71 | -------------------------------------------------------------------------------- /AppleAudioUnit.podspec: -------------------------------------------------------------------------------- 1 | # 2 | # Be sure to run `pod lib lint AppleAudioUnit.podspec' to ensure this is a 3 | # valid spec before submitting. 4 | # 5 | # Any lines starting with a # are optional, but their use is encouraged 6 | # To learn more about a Podspec see https://guides.cocoapods.org/syntax/podspec.html 7 | # 8 | 9 | Pod::Spec.new do |s| 10 | s.name = 'AppleAudioUnit' 11 | s.version = '0.1.2' 12 | s.summary = 'A base implementation of Apple\'s AUAudioUnit to simplify the creation of custom audio units.' 13 | 14 | # This description is used to generate tags and improve search results. 15 | # * Think: What does it do? Why did you write it? What is the focus? 16 | # * Try to keep it short, snappy and to the point. 17 | # * Write the description between the DESC delimiters below. 18 | # * Finally, don't worry about the indent, CocoaPods strips it! 19 | 20 | s.description = <<-DESC 21 | In order to create an Apple AUv3 audio unit, an AUAudioUnit subclass must be created. The base AUAudioUnit implementation does not work out of the box. These subclasses manage buffers, per event rendering, and parameter states, including ramping. 22 | DESC 23 | 24 | s.homepage = 'https://github.com/dave234/AppleAudioUnit' 25 | # s.screenshots = 'www.example.com/screenshots_1', 'www.example.com/screenshots_2' 26 | s.license = { :type => 'MIT', :file => 'LICENSE' } 27 | s.author = { 'Dave O\'Neill' => 'daveoneill234@gmail.com' } 28 | s.source = { :git => 'https://github.com/dave234/AppleAudioUnit.git', :tag => s.version.to_s } 29 | # s.social_media_url = 'https://twitter.com/' 30 | 31 | s.ios.deployment_target = '9.0' 32 | s.tvos.deployment_target = '9.0' 33 | s.osx.deployment_target = '10.11' 34 | 35 | s.swift_version = '5.0' 36 | s.source_files = 'AppleAudioUnit/Classes/**/*' 37 | 38 | # s.resource_bundles = { 39 | # 'AppleAudioUnit' => ['AppleAudioUnit/Assets/*.png'] 40 | # } 41 | 42 | # s.public_header_files = 'Pod/Classes/**/*.h' 43 | # s.frameworks = 'AVFoundation', 'AudioToolBox' 44 | s.dependency 'Cwift' 45 | end 46 | -------------------------------------------------------------------------------- /AppleAudioUnit/Classes/AAUParameterState.c: -------------------------------------------------------------------------------- 1 | // 2 | // AAUParameterState.c 3 | // App 4 | // 5 | // Created by David O'Neill on 12/15/18. 6 | // 7 | 8 | #include "AAUParameterState.h" 9 | 10 | void AAUParameterStateSetValue(AAUParameterState *state, float value) { 11 | state->value = value; 12 | state->target = value; 13 | state->increment = 0; 14 | state->count = 0; 15 | } 16 | 17 | void AAUParameterStateSetRamp(AAUParameterState *state, float startValue, float targetValue, int durationInSamples) { 18 | if (durationInSamples < 1 || startValue == targetValue) { 19 | return AAUParameterStateSetValue(state, targetValue); 20 | } 21 | 22 | state->value = startValue; 23 | state->target = targetValue; 24 | state->increment = (state->target - state->value) / durationInSamples; 25 | state->count = durationInSamples; 26 | } 27 | 28 | float AAUParameterStateAdvance(AAUParameterState *state, int count) { 29 | if (state->count < 1) { 30 | return state->value; 31 | } 32 | 33 | count = count < state->count ? count : state->count; 34 | state->value += count * state->increment; 35 | state->count -= count; 36 | return state->value; 37 | } 38 | 39 | float AAUParameterStateAdvanceOne(AAUParameterState *state) { 40 | if (state->count < 1) { 41 | return state->value; 42 | } 43 | 44 | state->value += state->increment; 45 | state->count -= 1; 46 | return state->value; 47 | } 48 | 49 | float AAUParameterStateGetValue(AAUParameterState *state) { 50 | return state->value; 51 | } 52 | 53 | CWIFT_BOOL AAUParameterStateIsRamping(AAUParameterState *state) { 54 | return state->count > 0; 55 | } 56 | 57 | -------------------------------------------------------------------------------- /AppleAudioUnit/Classes/AAUParameterState.h: -------------------------------------------------------------------------------- 1 | // 2 | // AAUParameterState.h 3 | // AppleAudioUnit 4 | // 5 | // Created by David O'Neill on 12/15/18. 6 | // 7 | 8 | #ifndef ParameterState_h 9 | #define ParameterState_h 10 | 11 | #include 12 | #include 13 | 14 | typedef struct AAUParameterState { 15 | float value; 16 | float target; 17 | float increment; 18 | int count; 19 | } AAUParameterState; 20 | 21 | 22 | void AAUParameterStateSetValue(AAUParameterState *state, float value); 23 | void AAUParameterStateSetRamp(AAUParameterState *state, float startValue, float targetValue, int durationInSamples); 24 | float AAUParameterStateAdvance(AAUParameterState *state, int count); 25 | float AAUParameterStateAdvanceOne(AAUParameterState *state); 26 | float AAUParameterStateGetValue(AAUParameterState *state); 27 | CWIFT_BOOL AAUParameterStateIsRamping(AAUParameterState *state); 28 | 29 | 30 | #endif /* ParameterState_h */ 31 | -------------------------------------------------------------------------------- /AppleAudioUnit/Classes/AVAudioUnitCreator.swift: -------------------------------------------------------------------------------- 1 | // 2 | // AVAudioUnitCreator.swift 3 | // AppleAudioUnit 4 | // 5 | // Created by David O'Neill on 12/15/18. 6 | // Copyright © 2018 David O'Neill. All rights reserved. 7 | // 8 | 9 | public class AVAudioUnitCreator { 10 | 11 | private init(){} 12 | 13 | public struct NodeUnit { 14 | public let node: AVAudioUnit // Inherits from AVAudioNode, naming is weak here. 15 | public let unit: T 16 | } 17 | 18 | public static func createAsync(description: AudioComponentDescription, 19 | name: String? = nil, 20 | options: AudioComponentInstantiationOptions = [], 21 | version: UInt32 = 1, 22 | callback: @escaping (Result, Error>) -> Void ) { 23 | 24 | AUAudioUnit.registerSubclass(T.self, as: description, name: name ?? NSStringFromClass(T.self), version: version) 25 | AVAudioUnit.instantiate(with: description, options: options) { (avAudioUnit, error) in 26 | guard 27 | let avAudioUnit = avAudioUnit, 28 | let auAudioUnit = avAudioUnit.auAudioUnit as? T else { 29 | return callback(.failure(error ?? NSError(domain: "AppleAudioUnit", code: 0, 30 | userInfo: [NSLocalizedDescriptionKey: "AVAudioUnit.instantiate failed"]) as Error)) 31 | } 32 | callback(.success(NodeUnit(node: avAudioUnit, unit: auAudioUnit))) 33 | } 34 | } 35 | 36 | public static func create(description: AudioComponentDescription, 37 | name: String? = nil, 38 | version: UInt32 = 1, 39 | options: AudioComponentInstantiationOptions = []) throws -> NodeUnit { 40 | 41 | var asyncResult: Result, Error>? 42 | let group = DispatchGroup() 43 | group.enter() 44 | createAsync(description: description, options: options) { result in 45 | asyncResult = result 46 | group.leave() 47 | } 48 | group.wait() 49 | guard let syncResult = asyncResult else { fatalError() } 50 | 51 | switch syncResult { 52 | case .success(let nodeUnit): 53 | return NodeUnit(node: nodeUnit.node, unit: nodeUnit.unit) 54 | case let .failure(error): 55 | throw error 56 | } 57 | } 58 | 59 | } 60 | -------------------------------------------------------------------------------- /AppleAudioUnit/Classes/AppleAudioUnit.h: -------------------------------------------------------------------------------- 1 | // 2 | // AppleAudioUnit.h 3 | // AppleAudioUnit 4 | // 5 | // Created by David O'Neill on 2/10/19. 6 | // 7 | 8 | #import "BufferedAudioUnit.h" 9 | #import "ParameterizedAudioUnit.h" 10 | #import "AudioComponentDescription-Cwift.h" 11 | #import "EventSlicedAudioUnit.h" 12 | 13 | -------------------------------------------------------------------------------- /AppleAudioUnit/Classes/AudioBufferListHelper.c: -------------------------------------------------------------------------------- 1 | // 2 | // AudioBufferListHelper.c 3 | // AppleAudioUnit 4 | // 5 | // Created by David O'Neill on 12/16/18. 6 | // 7 | 8 | #include "AudioBufferListHelper.h" 9 | 10 | // Private AudioBufferList helpers. 11 | void bufferListPrepare(AudioBufferList *audioBufferList, 12 | int channelCount, 13 | int frameCount) { 14 | 15 | audioBufferList->mNumberBuffers = channelCount; 16 | for (int channelIndex = 0; channelIndex < channelCount; channelIndex++) { 17 | audioBufferList->mBuffers[channelIndex].mNumberChannels = 1; 18 | audioBufferList->mBuffers[channelIndex].mDataByteSize = frameCount * sizeof(float); 19 | } 20 | } 21 | 22 | void bufferListClear(AudioBufferList *audioBufferList) { 23 | for (int i = 0; i < audioBufferList->mNumberBuffers; i++) { 24 | memset(audioBufferList->mBuffers[i].mData, 0, audioBufferList->mBuffers[i].mDataByteSize); 25 | } 26 | } 27 | 28 | size_t bufferListByteSize(int channelCount) { 29 | return sizeof(AudioBufferList) + (sizeof(AudioBuffer) * (channelCount - 1)); 30 | } 31 | 32 | Boolean bufferListHasNullData(AudioBufferList *bufferList) { 33 | return bufferList->mBuffers[0].mData == NULL; 34 | } 35 | 36 | void bufferListPointChannelDataToBuffer(AudioBufferList *bufferList, float *buffer) { 37 | int frameCount = bufferList->mBuffers[0].mDataByteSize / sizeof(float); 38 | for (int channelIndex = 0; channelIndex < bufferList->mNumberBuffers; channelIndex++) { 39 | int offset = channelIndex * frameCount; 40 | bufferList->mBuffers[channelIndex].mData = buffer + offset; 41 | } 42 | } 43 | 44 | AudioBufferList *bufferListPrepareSlice(AudioBufferList *bufferList, AudioBufferList *slice, int offset, int frameCount) { 45 | bufferListPrepare(slice, bufferList->mNumberBuffers, frameCount); 46 | for (int channelIndex = 0; channelIndex < bufferList->mNumberBuffers; channelIndex++) { 47 | slice->mBuffers[channelIndex].mData = ((float*)bufferList->mBuffers[channelIndex].mData) + offset; 48 | } 49 | return slice; 50 | } 51 | -------------------------------------------------------------------------------- /AppleAudioUnit/Classes/AudioBufferListHelper.h: -------------------------------------------------------------------------------- 1 | // 2 | // AudioBufferListHelper.h 3 | // AppleAudioUnit 4 | // 5 | // Created by David O'Neill on 12/16/18. 6 | // 7 | 8 | #ifndef AudioBufferListHelper_h 9 | #define AudioBufferListHelper_h 10 | 11 | #include 12 | #include 13 | 14 | void bufferListPrepare(AudioBufferList *audioBufferList, int channelCount, int frameCount); 15 | void bufferListClear(AudioBufferList *audioBufferList); 16 | size_t bufferListByteSize(int channelCount); 17 | Boolean bufferListHasNullData(AudioBufferList *bufferList); 18 | void bufferListPointChannelDataToBuffer(AudioBufferList *bufferList, float *buffer); 19 | 20 | // This will prepare slice and point it's buffers to bufferlist's buffers + offset. Used for partial rendering of bufferList. 21 | AudioBufferList *bufferListPrepareSlice(AudioBufferList *bufferList, AudioBufferList *slice, int offset, int frameCount); 22 | #endif /* AudioBufferListHelper_h */ 23 | -------------------------------------------------------------------------------- /AppleAudioUnit/Classes/AudioComponentDescription-Cwift.c: -------------------------------------------------------------------------------- 1 | // 2 | // AudioComponentDescription-Cwift.c 3 | // AppleAudioUnit 4 | // 5 | // Created by David O'Neill on 12/15/18. 6 | // 7 | 8 | #include "AudioComponentDescription-Cwift.h" 9 | 10 | static UInt32 fourCharCode(const char *str) { 11 | return (UInt32)str[0] << 24 | (UInt32)str[1] << 16 | (UInt32)str[2] << 8 | (UInt32)str[3]; 12 | } 13 | 14 | AudioComponentDescription AudioComponentDescriptionCreate(AudioComponentDescriptionType type, 15 | FourCharCString fourCharSubType, 16 | FourCharCString fourCharManufacturer) { 17 | 18 | assert(fourCharSubType != NULL && 19 | strlen(fourCharSubType) == 4 && 20 | "AudioComponentDescription subType not four chars!"); 21 | 22 | assert(fourCharManufacturer != NULL && 23 | strlen(fourCharManufacturer) == 4 && 24 | "AudioComponentDescription manufacturer not four chars!"); 25 | 26 | return (AudioComponentDescription){ 27 | .componentType = type, 28 | .componentSubType = fourCharCode(fourCharSubType), 29 | .componentManufacturer = fourCharCode(fourCharManufacturer), 30 | .componentFlags = 0, 31 | .componentFlagsMask = 0 32 | }; 33 | } 34 | -------------------------------------------------------------------------------- /AppleAudioUnit/Classes/AudioComponentDescription-Cwift.h: -------------------------------------------------------------------------------- 1 | // 2 | // AudioComponentDescription-Cwift.h 3 | // AppleAudioUnit 4 | // 5 | // Created by David O'Neill on 12/15/18. 6 | // 7 | 8 | #ifndef AudioComponentDescription_Cwift_h 9 | #define AudioComponentDescription_Cwift_h 10 | 11 | #include 12 | #include 13 | #include 14 | 15 | typedef enum CWIFT_ENUM AudioComponentDescriptionType { 16 | AudioComponentDescriptionTypeOutput = kAudioUnitType_Output, 17 | AudioComponentDescriptionTypeMusicDevice = kAudioUnitType_MusicDevice, 18 | AudioComponentDescriptionTypeMusicEffect = kAudioUnitType_MusicEffect, 19 | AudioComponentDescriptionTypeFormatConverter = kAudioUnitType_FormatConverter, 20 | AudioComponentDescriptionTypeEffect = kAudioUnitType_Effect, 21 | AudioComponentDescriptionTypeMixer = kAudioUnitType_Mixer, 22 | AudioComponentDescriptionTypePanner = kAudioUnitType_Panner, 23 | AudioComponentDescriptionTypeGenerator = kAudioUnitType_Generator, 24 | AudioComponentDescriptionTypeOfflineEffect = kAudioUnitType_OfflineEffect, 25 | AudioComponentDescriptionTypeMIDIProcessor = kAudioUnitType_MIDIProcessor 26 | } AudioComponentDescriptionType; 27 | 28 | // Hack for extending AudioComponentDescription with CWIFT_NAME 29 | typedef AudioComponentDescription _AudioComponentDescriptionExtend_; 30 | 31 | // Better hinting with a type. 32 | typedef const char *FourCharCString; 33 | 34 | AudioComponentDescription AudioComponentDescriptionCreate(AudioComponentDescriptionType type, 35 | FourCharCString subType, 36 | FourCharCString manufacturer) 37 | CWIFT_NAME(_AudioComponentDescriptionExtend_.init(type:subType:manufacturer:)); 38 | 39 | #endif /* AudioComponentDescription_Cwift.h */ 40 | -------------------------------------------------------------------------------- /AppleAudioUnit/Classes/BufferedAudioUnit.h: -------------------------------------------------------------------------------- 1 | // 2 | // BufferedAudioUnit.h 3 | // AppleAudioUnit 4 | // 5 | // Created by David O'Neill on 12/15/18. 6 | // Copyright © 2018 David O'Neill. All rights reserved. 7 | // 8 | 9 | #import 10 | #import 11 | #import 12 | 13 | NS_ASSUME_NONNULL_BEGIN 14 | 15 | /** No Swift or ObjC functions and property access. No ARC managed references/assignments. */ 16 | typedef void(^ProcessEventsBlock)(AudioBufferList * _Nullable inBuffer, 17 | AudioBufferList * _Nonnull outBuffer, 18 | const AudioTimeStamp * _Nonnull timestamp, 19 | AVAudioFrameCount frameCount, 20 | const AURenderEvent * _Nullable eventsListHead); 21 | 22 | @interface BufferedAudioUnit : AUAudioUnit 23 | 24 | /** Subclasses should overide this to return a block to do processing in. */ 25 | -(ProcessEventsBlock)processEventsBlock:(AVAudioFormat *)format; 26 | 27 | /** If true, an input bus will be allocated, intended for sunclasses to override, defaults to true. */ 28 | -(BOOL)shouldAllocateInputBus; 29 | 30 | /** If true, the output buffer samples will be set to zero pre-render, Intended for subclasses to override, defaults to false. */ 31 | -(BOOL)shouldClearOutputBuffer; 32 | 33 | @end 34 | 35 | 36 | NS_ASSUME_NONNULL_END 37 | -------------------------------------------------------------------------------- /AppleAudioUnit/Classes/BufferedAudioUnit.m: -------------------------------------------------------------------------------- 1 | // 2 | // BufferedAudioUnit.m 3 | // AppleAudioUnit 4 | // 5 | // Created by David O'Neill on 12/15/18. 6 | // Copyright © 2018 David O'Neill. All rights reserved. 7 | // 8 | 9 | #import "BufferedAudioUnit.h" 10 | #import "AudioBufferListHelper.h" 11 | 12 | static const int kMaxChannelCount = 16; 13 | 14 | @implementation BufferedAudioUnit { 15 | float *_inputBuffer; 16 | float *_ouputBuffer; 17 | AUAudioUnitBusArray *_inputBusArray; 18 | AUAudioUnitBusArray *_outputBusArray; 19 | ProcessEventsBlock _processEventsBlock; 20 | BOOL _shouldClearOutputBuffer; 21 | } 22 | 23 | - (instancetype)initWithComponentDescription:(AudioComponentDescription)componentDescription 24 | options:(AudioComponentInstantiationOptions)options 25 | error:(NSError **)outError { 26 | 27 | self = [super initWithComponentDescription:componentDescription options:options error:outError]; 28 | if (self != nil) { 29 | 30 | AVAudioFormat *arbitraryFormat = [[AVAudioFormat alloc] initStandardFormatWithSampleRate:44100 channels:2]; 31 | if ([self shouldAllocateInputBus]) { 32 | _inputBusArray = [[AUAudioUnitBusArray alloc] initWithAudioUnit:self 33 | busType:AUAudioUnitBusTypeInput 34 | busses: @[[[AUAudioUnitBus alloc]initWithFormat:arbitraryFormat error:NULL]]]; 35 | } 36 | 37 | _outputBusArray = [[AUAudioUnitBusArray alloc] initWithAudioUnit:self 38 | busType:AUAudioUnitBusTypeOutput 39 | busses: @[[[AUAudioUnitBus alloc]initWithFormat:arbitraryFormat error:NULL]]]; 40 | 41 | _shouldClearOutputBuffer = [self shouldClearOutputBuffer]; 42 | } 43 | return self; 44 | } 45 | 46 | -(BOOL)shouldAllocateInputBus { 47 | return true; 48 | } 49 | 50 | -(BOOL)shouldClearOutputBuffer { 51 | return false; 52 | } 53 | 54 | - (BOOL)allocateRenderResourcesAndReturnError:(NSError **)outError { 55 | if (![super allocateRenderResourcesAndReturnError:outError]) { 56 | return NO; 57 | } 58 | 59 | AVAudioFormat *format = _outputBusArray[0].format; 60 | if (_inputBusArray != NULL && [_inputBusArray[0].format isEqual: format] == false) { 61 | if (outError) { 62 | *outError = [NSError errorWithDomain:NSOSStatusErrorDomain code:kAudioUnitErr_FormatNotSupported userInfo:nil]; 63 | } 64 | NSLog(@"%@ input format must match output format", self.class); 65 | self.renderResourcesAllocated = NO; 66 | return NO; 67 | } 68 | 69 | assert(_inputBuffer == NULL && _ouputBuffer == NULL); 70 | 71 | size_t bufferSize = sizeof(float) * format.channelCount * self.maximumFramesToRender; 72 | if (self.shouldAllocateInputBus) { 73 | _inputBuffer = malloc(bufferSize); 74 | } 75 | 76 | if (self.canProcessInPlace == false || self.shouldAllocateInputBus == false) { 77 | _ouputBuffer = malloc(bufferSize); 78 | } 79 | 80 | _processEventsBlock = [self processEventsBlock: format]; 81 | return YES; 82 | } 83 | 84 | -(void)deallocateRenderResources { 85 | if (_inputBuffer != NULL) { 86 | free(_inputBuffer); 87 | } 88 | 89 | if (_ouputBuffer != NULL) { 90 | free(_ouputBuffer); 91 | } 92 | _inputBuffer = NULL; 93 | _ouputBuffer = NULL; 94 | [super deallocateRenderResources]; 95 | } 96 | 97 | -(ProcessEventsBlock)processEventsBlock:(AVAudioFormat *)format { 98 | 99 | return ^(AudioBufferList *inBuffer, 100 | AudioBufferList *outBuffer, 101 | const AudioTimeStamp *timestamp, 102 | AVAudioFrameCount frameCount, 103 | const AURenderEvent *realtimeEventListHead) { 104 | 105 | if (inBuffer == NULL) { 106 | for (int i = 0; i < outBuffer->mNumberBuffers; i++) { 107 | memset(outBuffer->mBuffers[i].mData, 0, outBuffer->mBuffers[i].mDataByteSize); 108 | } 109 | } else { 110 | for (int i = 0; i < inBuffer->mNumberBuffers; i++) { 111 | memcpy(outBuffer->mBuffers[i].mData, inBuffer->mBuffers[i].mData, inBuffer->mBuffers[i].mDataByteSize); 112 | } 113 | } 114 | }; 115 | } 116 | 117 | - (AUInternalRenderBlock)internalRenderBlock { 118 | 119 | // Use untracked pointer and ivars to avoid Obj methods + ARC. 120 | __unsafe_unretained BufferedAudioUnit *welf = self; 121 | return ^AUAudioUnitStatus(AudioUnitRenderActionFlags *actionFlags, 122 | const AudioTimeStamp *timestamp, 123 | AVAudioFrameCount frameCount, 124 | NSInteger outputBusNumber, 125 | AudioBufferList *outputBufferList, 126 | const AURenderEvent *realtimeEventListHead, 127 | AURenderPullInputBlock pullInputBlock) { 128 | 129 | // Stack allocating for inputBufferList with char[], guard against potential stack overflow. 130 | assert(outputBufferList->mNumberBuffers <= kMaxChannelCount); 131 | char inputBufferListAllocation[bufferListByteSize(outputBufferList->mNumberBuffers)]; 132 | AudioBufferList *inputBufferList = (AudioBufferList *)inputBufferListAllocation; 133 | 134 | // If hasInput, point to inputBufferList and fill with pullInputBlock(). 135 | bool hasInput = welf->_inputBuffer != NULL; 136 | if (hasInput) { 137 | 138 | // Prepare buffer for pull input. 139 | bufferListPrepare(inputBufferList, outputBufferList->mNumberBuffers, frameCount); 140 | bufferListPointChannelDataToBuffer(inputBufferList, welf->_inputBuffer); 141 | 142 | // Pull input into _inputBuffer. 143 | AudioUnitRenderActionFlags flags = 0; 144 | AUAudioUnitStatus status = pullInputBlock(&flags, timestamp, frameCount, 0, inputBufferList); 145 | if (status) 146 | return status; 147 | } 148 | 149 | // If outputBufferList has null data, point to valid buffer before processing. 150 | if (bufferListHasNullData(outputBufferList)) { 151 | float *buffer = welf->_ouputBuffer ?: welf->_inputBuffer; 152 | bufferListPointChannelDataToBuffer(outputBufferList, buffer); 153 | } 154 | 155 | if (welf->_shouldClearOutputBuffer) { 156 | bufferListClear(outputBufferList); 157 | } 158 | 159 | welf->_processEventsBlock(hasInput ? inputBufferList : NULL, outputBufferList, timestamp, frameCount, realtimeEventListHead); 160 | return noErr; 161 | }; 162 | } 163 | 164 | -(AUAudioUnitBusArray *)inputBusses { 165 | return _inputBusArray; 166 | } 167 | 168 | -(AUAudioUnitBusArray *)outputBusses { 169 | return _outputBusArray; 170 | } 171 | 172 | @end 173 | -------------------------------------------------------------------------------- /AppleAudioUnit/Classes/EventSlicedAudioUnit.h: -------------------------------------------------------------------------------- 1 | // 2 | // EventSlicedAudioUnit.h 3 | // AppleAudioUnit 4 | // 5 | // Created by David O'Neill on 12/16/18. 6 | // 7 | 8 | #import "BufferedAudioUnit.h" 9 | 10 | typedef void(^ProcessSliceBlock)(AudioBufferList * _Nullable inBuffer, 11 | AudioBufferList * _Nonnull outBuffer, 12 | const AudioTimeStamp * _Nonnull timestamp, 13 | AVAudioFrameCount timeStampOffset, 14 | AVAudioFrameCount frameCount); 15 | 16 | typedef void(^ProcessMidiEventBlock)(const AudioTimeStamp * _Nonnull timestamp, 17 | AVAudioFrameCount timeStampOffset, 18 | AUMIDIEvent * _Nonnull event, 19 | BOOL isSysEx); 20 | 21 | typedef void(^ProcessParameterEventBlock)(const AudioTimeStamp * _Nonnull timestamp, 22 | AVAudioFrameCount timeStampOffset, 23 | AUParameterEvent * _Nonnull event, 24 | BOOL isRamped); 25 | 26 | @interface EventSlicedAudioUnit : BufferedAudioUnit 27 | 28 | // Overrides for subclasses. 29 | - (ProcessSliceBlock _Nullable)processSliceBlock; 30 | - (ProcessMidiEventBlock _Nullable)processMidiEventBlock; 31 | - (ProcessParameterEventBlock _Nullable)processParameterEventBlock; 32 | 33 | @end 34 | -------------------------------------------------------------------------------- /AppleAudioUnit/Classes/EventSlicedAudioUnit.m: -------------------------------------------------------------------------------- 1 | // 2 | // EventSlicedAudioUnit.m 3 | // AppleAudioUnit 4 | // 5 | // Created by David O'Neill on 12/16/18. 6 | // 7 | 8 | #import "EventSlicedAudioUnit.h" 9 | #import "AudioBufferListHelper.h" 10 | 11 | static const int kMaxChannelCount = 16; 12 | 13 | @implementation EventSlicedAudioUnit 14 | 15 | -(ProcessEventsBlock)processEventsBlock:(AVAudioFormat *)format { 16 | 17 | ProcessSliceBlock processSliceBlock = self.processSliceBlock; 18 | ProcessMidiEventBlock processMidiEventBlock = self.processMidiEventBlock; 19 | ProcessParameterEventBlock processParameterEventBlock = self.processParameterEventBlock; 20 | BOOL hasInput = self.shouldAllocateInputBus; 21 | 22 | if (processSliceBlock == NULL && processMidiEventBlock == NULL && processParameterEventBlock == NULL) { 23 | return [super processEventsBlock:format]; 24 | } 25 | 26 | 27 | 28 | // This calls subclasses processMidiEventBlock, processParameterEventBlock, then processSliceBlock. 29 | return ^(AudioBufferList *inBuffer, 30 | AudioBufferList *outBuffer, 31 | const AudioTimeStamp *timestamp, 32 | AVAudioFrameCount frameCount, 33 | const AURenderEvent *eventsListHead) { 34 | 35 | // If no events, process in one slice. 36 | if (eventsListHead == NULL) { 37 | if (processSliceBlock != NULL) { 38 | processSliceBlock(inBuffer, outBuffer, timestamp, 0, frameCount); 39 | } 40 | return; 41 | } 42 | 43 | // Guard against stack overflow in variable stack allocation below. 44 | assert(outBuffer->mNumberBuffers <= kMaxChannelCount); 45 | 46 | // inSliceMem and outSliceMem are stack allocations for inBufferSlice and outBufferSlice. 47 | // We only need a local var for our slices, but AudioBufferList is a variable sized struct. 48 | char inSliceMem[bufferListByteSize(outBuffer->mNumberBuffers)]; 49 | char outSliceMem[bufferListByteSize(outBuffer->mNumberBuffers)]; 50 | 51 | AudioBufferList *inBufferSlice = hasInput ? (AudioBufferList *)inSliceMem : NULL; 52 | AudioBufferList *outBufferSlice = (AudioBufferList *)outSliceMem; 53 | 54 | // Iterate linked list of events, call Midi or Parameter block for each event, and render slices of audio in-bewteen non-simultaneous events. 55 | AURenderEvent *event = (AURenderEvent *)eventsListHead; 56 | while (event != NULL) { 57 | 58 | // Cast to header in order to get type and eventSampleTime. 59 | AURenderEventHeader *header = &event->head; 60 | AVAudioFrameCount timestampOffset = (AVAudioFrameCount)header->eventSampleTime - timestamp->mSampleTime; 61 | AURenderEventType type = header->eventType; 62 | 63 | switch (type) { 64 | case AURenderEventMIDI: 65 | case AURenderEventMIDISysEx: 66 | if (processMidiEventBlock != NULL) { 67 | processMidiEventBlock(timestamp, timestampOffset, &event->MIDI, type == AURenderEventMIDISysEx); 68 | } 69 | break; 70 | 71 | case AURenderEventParameter: 72 | case AURenderEventParameterRamp: 73 | if (processParameterEventBlock != NULL) { 74 | processParameterEventBlock(timestamp, timestampOffset, &event->parameter, type == AURenderEventParameterRamp); 75 | } 76 | break; 77 | } 78 | 79 | // If this is the last event or the next event has a different sample time render a slice. 80 | AURenderEventHeader *nextHeader = header->next ? &header->next->head : NULL; 81 | if (processSliceBlock != NULL && (!nextHeader || nextHeader->eventSampleTime != header->eventSampleTime)) { 82 | 83 | // Render to next event sample time or to end if no next event. 84 | AVAudioFramePosition sliceEnd = nextHeader == NULL ? timestamp->mSampleTime + frameCount : nextHeader->eventSampleTime; 85 | AVAudioFrameCount sliceFrameCount = (AVAudioFrameCount)(sliceEnd - header->eventSampleTime); 86 | 87 | if (hasInput) 88 | bufferListPrepareSlice(inBuffer, (AudioBufferList *)inSliceMem, timestampOffset, sliceFrameCount); 89 | 90 | bufferListPrepareSlice(outBuffer, outBufferSlice, timestampOffset, sliceFrameCount); 91 | processSliceBlock(inBufferSlice, outBufferSlice, timestamp, timestampOffset, sliceFrameCount); 92 | } 93 | 94 | event = header->next; 95 | } 96 | 97 | }; 98 | } 99 | 100 | - (ProcessSliceBlock)processSliceBlock { 101 | return NULL; 102 | } 103 | 104 | - (ProcessMidiEventBlock)processMidiEventBlock { 105 | return NULL; 106 | } 107 | 108 | - (ProcessParameterEventBlock)processParameterEventBlock { 109 | return NULL; 110 | } 111 | 112 | @end 113 | 114 | -------------------------------------------------------------------------------- /AppleAudioUnit/Classes/ParameterizedAudioUnit.h: -------------------------------------------------------------------------------- 1 | // 2 | // ParameterizedAudioUnit.h 3 | // AppleAudioUnit 4 | // 5 | // Created by David O'Neill on 12/16/18. 6 | // 7 | 8 | #import "AAUParameterState.h" 9 | #import "EventSlicedAudioUnit.h" 10 | 11 | NS_ASSUME_NONNULL_BEGIN 12 | 13 | typedef struct RenderParameter { 14 | float value; 15 | float isRamping; 16 | float isChanged; 17 | } RenderParameter; 18 | 19 | typedef void(^ProcessAudioBlock)(float * _Nullable * _Nonnull inChannels, 20 | float * _Nonnull * _Nonnull outChannels, 21 | int channelCount, 22 | const AudioTimeStamp * _Nonnull timestamp, 23 | int timeStampOffset, 24 | int frameCount, 25 | float *parameters); 26 | 27 | @interface ParameterizedAudioUnit : EventSlicedAudioUnit 28 | @property int rampingSliceSize; // Defaults to 16. 29 | @property BOOL isImplicitRampingEnabled; // Defaults to yes. 30 | - (void)setParameterTree:(AUParameterTree *)parameterTree; 31 | - (void)setRampDuration:(float)seconds parameterAddress:(AUParameterAddress)address; 32 | - (AUParameter * _Nullable)parameterWithAddress:(AUParameterAddress)address; 33 | - (ProcessAudioBlock)processAudioBlock; 34 | @end 35 | 36 | NS_ASSUME_NONNULL_END 37 | -------------------------------------------------------------------------------- /AppleAudioUnit/Classes/ParameterizedAudioUnit.m: -------------------------------------------------------------------------------- 1 | // 2 | // ParameterizedAudioUnit.m 3 | // AppleAudioUnit 4 | // 5 | // Created by David O'Neill on 12/16/18. 6 | // 7 | 8 | #import "ParameterizedAudioUnit.h" 9 | #import 10 | 11 | static const float kDefaultRampDurationSeconds = 0.03; 12 | static const int kDefaultRampSliceSize = 16; 13 | 14 | typedef struct ParameterUIState { 15 | CWIFT_BOOL uiSetValue; 16 | float uiValue; 17 | float uiRampDuration; 18 | } ParameterUIState; 19 | 20 | @implementation ParameterizedAudioUnit { 21 | int _parameterCount; 22 | pthread_mutex_t _renderMutex; 23 | ParameterUIState *_parameterUIStates; 24 | float _renderSampleRate; 25 | AAUParameterState *_parameterStates; 26 | } 27 | 28 | @synthesize parameterTree = _parameterTree; 29 | 30 | - (instancetype)initWithComponentDescription:(AudioComponentDescription)componentDescription 31 | options:(AudioComponentInstantiationOptions)options 32 | error:(NSError **)outError { 33 | self = [super initWithComponentDescription:componentDescription options:options error:outError]; 34 | _rampingSliceSize = kDefaultRampSliceSize; 35 | _isImplicitRampingEnabled = true; 36 | _parameterTree = self.parameterTree; // Maintain super's intended override mechanism. 37 | if (self && _parameterTree && [self valideateParameters:outError]) { 38 | [self createParameterStates]; 39 | } 40 | return self; 41 | } 42 | 43 | - (void)setParameterTree:(AUParameterTree *)parameterTree { 44 | [self destroyParameterStates]; 45 | _parameterTree = parameterTree; 46 | [self createParameterStates]; 47 | } 48 | 49 | - (AUParameterTree *)parameterTree { 50 | return _parameterTree; 51 | } 52 | 53 | - (void)createParameterStates { 54 | [self destroyParameterStates]; 55 | 56 | if (_parameterTree == NULL) { 57 | return; 58 | } 59 | 60 | _parameterCount = (int)_parameterTree.allParameters.count; 61 | if (_parameterCount <= 0) { 62 | return; 63 | } 64 | 65 | pthread_mutex_init(&_renderMutex, NULL); 66 | _parameterStates = malloc(sizeof(AAUParameterState) * _parameterCount); 67 | _parameterUIStates = malloc(sizeof(ParameterUIState) * _parameterCount); 68 | 69 | for (int i = 0; i < _parameterCount; i++) { 70 | AUParameter *parameter = [_parameterTree parameterWithAddress:i]; 71 | _parameterUIStates[i].uiSetValue = 0; 72 | _parameterUIStates[i].uiValue = 0; 73 | _parameterUIStates[i].uiRampDuration = kDefaultRampDurationSeconds; 74 | AAUParameterStateSetValue(&_parameterStates[i], parameter.value); 75 | } 76 | 77 | __unsafe_unretained ParameterizedAudioUnit *welf = self; 78 | _parameterTree.implementorValueObserver = ^(AUParameter * _Nonnull param, AUValue value) { 79 | pthread_mutex_lock(&welf->_renderMutex); 80 | welf->_parameterUIStates[param.address].uiSetValue = true; 81 | welf->_parameterUIStates[param.address].uiValue = value; 82 | pthread_mutex_unlock(&welf->_renderMutex); 83 | }; 84 | 85 | _parameterTree.implementorValueProvider = ^AUValue(AUParameter * _Nonnull param) { 86 | return AAUParameterStateGetValue(&welf->_parameterStates[param.address]); 87 | }; 88 | 89 | } 90 | 91 | -(void) destroyParameterStates { 92 | if (_parameterStates != NULL) 93 | free(_parameterStates); 94 | 95 | if (_parameterUIStates != NULL) 96 | free(_parameterUIStates); 97 | 98 | _parameterStates = NULL; 99 | _parameterUIStates = NULL; 100 | } 101 | 102 | -(void)dealloc { 103 | pthread_mutex_destroy(&_renderMutex); 104 | [self destroyParameterStates]; 105 | } 106 | 107 | - (void)setRampDuration:(float)seconds parameterAddress:(AUParameterAddress)address { 108 | NSAssert(address >= 0 || address < _parameterCount, @"Invalid parameter address"); 109 | ParameterUIState *state = &_parameterUIStates[address]; 110 | state->uiRampDuration = seconds; 111 | } 112 | 113 | - (BOOL)valideateParameters:(NSError **)outError { 114 | NSArray *parameters = [_parameterTree.allParameters sortedArrayUsingSelector:@selector(address)]; 115 | if (parameters.count > 0 && (parameters.firstObject.address != 0 || parameters.lastObject.address != parameters.count - 1)) { 116 | 117 | NSString *message = @"ParameterizedAudioUnit requires parameter addresses \ 118 | to be a sequence starting at 0 and incremeting by 1, example: [0, 1, 2, 3, 4]"; 119 | if (outError) { 120 | *outError = [NSError errorWithDomain:@"AppleAudioUnit" code:0 userInfo:@{NSLocalizedDescriptionKey: message}]; 121 | } else { 122 | NSLog(@"%@", message); 123 | } 124 | return false; 125 | } 126 | return true; 127 | } 128 | 129 | -(BOOL)allocateRenderResourcesAndReturnError:(NSError * _Nullable __autoreleasing *)outError { 130 | if (![super allocateRenderResourcesAndReturnError:outError]) { 131 | return false; 132 | } 133 | _renderSampleRate = self.outputBusses[0].format.sampleRate; 134 | return true; 135 | } 136 | 137 | static void preRenderParameterUpdate(__unsafe_unretained ParameterizedAudioUnit *welf) { 138 | if (welf->_parameterCount < 1 || pthread_mutex_trylock(&welf->_renderMutex) != 0) { 139 | return; 140 | } 141 | for (int i = 0; i < welf->_parameterCount; i++) { 142 | ParameterUIState *uiState = &welf->_parameterUIStates[i]; 143 | AAUParameterState *renderState = &welf->_parameterStates[i]; 144 | if (!uiState->uiSetValue) 145 | continue; 146 | 147 | if (uiState->uiRampDuration != 0) { 148 | AAUParameterStateSetRamp(renderState, 149 | renderState->value, 150 | uiState->uiValue, 151 | uiState->uiRampDuration * welf->_renderSampleRate); 152 | } else { 153 | AAUParameterStateSetValue(renderState, uiState->uiValue); 154 | } 155 | uiState->uiSetValue = false; 156 | } 157 | pthread_mutex_unlock(&welf->_renderMutex); 158 | } 159 | 160 | - (ProcessEventsBlock)processEventsBlock:(AVAudioFormat *)format { 161 | 162 | ProcessEventsBlock superProcessEvents = [super processEventsBlock:format]; 163 | __unsafe_unretained ParameterizedAudioUnit *welf = self; 164 | return ^(AudioBufferList *inBuffer, 165 | AudioBufferList *outBuffer, 166 | const AudioTimeStamp *timestamp, 167 | AVAudioFrameCount frameCount, 168 | const AURenderEvent *eventsListHead) { 169 | 170 | preRenderParameterUpdate(welf); 171 | superProcessEvents(inBuffer, outBuffer, timestamp, frameCount, eventsListHead); 172 | }; 173 | } 174 | 175 | //-(AAUParameterState *)parameterStates { 176 | // return _parameterStates; 177 | //} 178 | 179 | -(ProcessParameterEventBlock)processParameterEventBlock { 180 | 181 | __unsafe_unretained ParameterizedAudioUnit *welf = self; 182 | BOOL isImplicitRampingEnabled = self.isImplicitRampingEnabled; 183 | 184 | return ^(const AudioTimeStamp * _Nonnull timestamp, 185 | AVAudioFrameCount timeStampOffset, 186 | AUParameterEvent * _Nonnull event, 187 | BOOL isRamped) { 188 | 189 | AAUParameterState *parameterState = &welf->_parameterStates[event->parameterAddress]; 190 | if (isImplicitRampingEnabled && isRamped) { 191 | AAUParameterStateSetRamp(parameterState, parameterState->value, event->value, event->rampDurationSampleFrames); 192 | } else { 193 | AAUParameterStateSetValue(parameterState, event->value); 194 | } 195 | }; 196 | } 197 | 198 | -(ProcessSliceBlock) processSliceBlock { 199 | ProcessAudioBlock processAudioBlock = self.processAudioBlock; 200 | if (!processAudioBlock) { 201 | return NULL; 202 | } 203 | __unsafe_unretained ParameterizedAudioUnit *welf = self; 204 | int parameterCount = (int)_parameterTree.allParameters.count; 205 | BOOL hasInput = self.shouldAllocateInputBus; 206 | return ^(AudioBufferList * _Nullable inBuffer, 207 | AudioBufferList * _Nonnull outBuffer, 208 | const AudioTimeStamp * _Nonnull timestamp, 209 | AVAudioFrameCount timeStampOffset, 210 | AVAudioFrameCount frameCount) { 211 | 212 | int channelCount = outBuffer->mNumberBuffers; 213 | float parameters[MAX(parameterCount, 1)]; 214 | 215 | // Copy parameter values to parameter buffer. Check if ramping. 216 | BOOL isRamping = false; 217 | for (int i = 0; i < parameterCount; i++) { 218 | parameters[i] = welf->_parameterStates[i].value; 219 | isRamping = isRamping || AAUParameterStateIsRamping(&welf->_parameterStates[i]); 220 | } 221 | 222 | // Map bufferlist to channel buffers. 223 | float *inChannels[channelCount]; 224 | float *outChannels[channelCount]; 225 | for (int i = 0; i < channelCount; i++) { 226 | if (hasInput) { 227 | inChannels[i] = inBuffer->mBuffers[i].mData; 228 | } 229 | outChannels[i] = outBuffer->mBuffers[i].mData; 230 | } 231 | 232 | if (isRamping == false) { 233 | // Process buffers in one slice 234 | processAudioBlock(hasInput ? inChannels : NULL, outChannels, channelCount, timestamp, timeStampOffset, frameCount, parameters); 235 | } else { 236 | 237 | // Process audio block in <= parameterRampSliceSized chunks. 238 | int sliceSize = welf->_rampingSliceSize; 239 | int i = 0; 240 | 241 | while (i < frameCount) { 242 | 243 | sliceSize = MIN(sliceSize, frameCount - i); 244 | processAudioBlock(hasInput ? inChannels : NULL, outChannels, channelCount, timestamp, timeStampOffset + i, sliceSize, parameters); 245 | 246 | // Advance parameter states. 247 | for (int p = 0; p < parameterCount; p++) { 248 | parameters[p] = AAUParameterStateAdvance(&welf->_parameterStates[p], sliceSize); 249 | } 250 | 251 | // Advance channel buffer pointers. 252 | for (int c = 0; c < channelCount; c++) { 253 | if (hasInput) { 254 | inChannels[c] += sliceSize; 255 | } 256 | outChannels[c] += sliceSize; 257 | } 258 | 259 | i += sliceSize; 260 | } 261 | } 262 | }; 263 | } 264 | 265 | - (ProcessAudioBlock)processAudioBlock { 266 | return NULL; 267 | } 268 | 269 | -(AUParameter *)parameterWithAddress:(AUParameterAddress)address { 270 | if (_parameterTree == NULL) 271 | return NULL; 272 | 273 | return [_parameterTree parameterWithAddress:address]; 274 | } 275 | 276 | @end 277 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2019 Dave O'Neill 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # AppleAudioUnit 2 | 3 | This is very much a WIP, but here's how it works at the moment. 4 | 5 | Implementing an AUAudioUnit requires some boilerplate and some domain specific knowledge. These classes take an opinionated approach to aiding in the implementation of the essential and the most common use cases. Here are the classes in order of Parent -> Child. 6 | 7 | 8 | ### [BufferedAudioUnit][0] 9 | To ensure that there are buffers to render to, BufferedAudioUnit manages this. It works by allocating memory and implementing internalRenderBlock where it pulls the input block, then calls it's own processEventsBlock where there is a single input (if effect) and output buffer to read from and write to. ProcessEventsBlock is intended to be implemented by subclasses. 10 | 11 | ### [EventSlicedAudioUnit][1] 12 | In order to achieve sample accuracy with regard to MIDI and parameter events, a partial rendering strategy is used. EventSlicedAudioUnit implements processEventsBlock, where it parses each event, calling either processMidiEventBlock or processParameterEventBlock for each simultaneous event, then calling processSliceBlock for a sample count that leads to the either the next group of simultaneous events, or render-end. 13 | 14 | ### [ParameterizedAudioUnit][2] 15 | This very opinionated class facilitates the backing memory, ramping, and threading considerations necessary to implement audio unit parameters. It does so by inspecting the (subclasses) `parameterTree` property, then allocating an array of data structures that are used for managing each parameter's value and ramping state. It implements all three of EventSlicedAudioUnit's blocks, and calls it's own processAudioBlock which has a much simpler function signature. ProcessAudioBlock is sliced even further for parameter ramping and is intended to be implemented by subclasses, accessing only the parameter value without need to manually ramp and slice. This is perhaps the most opinionated bit, all parameters returned by a subclasses `parameterTree` property must use ParameterAddresses starting at 0 and incrementing to (parameter count - 1). Then, within the processAudioBlock, parameters are retrieved from a float* by subscripting with the address. 16 | 17 | ### [AppleAudioUnit][3] 18 | This class isn't yet working, so probably shouldn't be in here, but the idea is that it is a C interface to all of the functionality described above. It will work by passing around pointers to a dsp object, and calling function pointer callbacks with that dsp object. The ultimate goal of this class is to fully separate the Apple platform code from the DSP code in order to promote cross platform dsp implementations and integrations. 19 | 20 | During the POC of AppleAudioUnit I'm slowly coming to the conclusion that it may be too opinionated for some, but I feel that it will handily suit the 80% use case, so I will continue to develop this when I find time. 21 | 22 | [0]:https://github.com/dave234/AppleAudioUnit/blob/master/AppleAudioUnit/Classes/BufferedAudioUnit.h 23 | [1]:https://github.com/dave234/AppleAudioUnit/blob/master/AppleAudioUnit/Classes/EventSlicedAudioUnit.h 24 | [2]:https://github.com/dave234/AppleAudioUnit/blob/master/AppleAudioUnit/Classes/ParameterizedAudioUnit.h 25 | [3]:https://github.com/dave234/AppleAudioUnit/blob/master/AppleAudioUnit/Classes/AppleAudioUnit.h 26 | --------------------------------------------------------------------------------