diff --git a/Novocaine/AudioFileReader.h b/Novocaine/AudioFileReader.h new file mode 100755 index 0000000..9c6e494 --- /dev/null +++ b/Novocaine/AudioFileReader.h @@ -0,0 +1,64 @@ +// Copyright (c) 2012 Alex Wiltschko +// +// Permission is hereby granted, free of charge, to any person +// obtaining a copy of this software and associated documentation +// files (the "Software"), to deal in the Software without +// restriction, including without limitation the rights to use, +// copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the +// Software is furnished to do so, subject to the following +// conditions: +// +// The above copyright notice and this permission notice shall be +// included in all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES +// OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT +// HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +// WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +// OTHER DEALINGS IN THE SOFTWARE. + +#import +#import "RingBuffer.h" +#import "Novocaine.h" + + +@interface AudioFileReader : NSObject +{ + float currentTime; + float duration; + float samplingRate; + float latency; + UInt32 numChannels; + NSURL *audioFileURL; + + InputBlock readerBlock; + + BOOL playing; +} + +@property (getter=getCurrentTime, setter=setCurrentTime:) float currentTime; +@property (readonly, getter=getDuration) float duration; +@property float samplingRate; +@property UInt32 numChannels; +@property float latency; +@property (nonatomic, copy) NSURL *audioFileURL; +@property (nonatomic, copy) InputBlock readerBlock; +@property BOOL playing; + + +- (id)initWithAudioFileURL:(NSURL *)urlToAudioFile samplingRate:(float)thisSamplingRate numChannels:(UInt32)thisNumChannels; + +// You use this method to grab audio if you have your own callback. +// The buffer'll fill at the speed the audio is normally being played. +- (void)retrieveFreshAudio:(float *)buffer numFrames:(UInt32)thisNumFrames numChannels:(UInt32)thisNumChannels; +//- (float)getCurrentTime; +- (void)play; +- (void)pause; +- (void)stop; + + +@end diff --git a/Novocaine/AudioFileReader.mm b/Novocaine/AudioFileReader.mm new file mode 100755 index 0000000..9f5c0bd --- /dev/null +++ b/Novocaine/AudioFileReader.mm @@ -0,0 +1,296 @@ +// +// AudioFileReader.m +// Novocaine +// +// Copyright (c) 2012 Alex Wiltschko +// +// Permission is hereby granted, free of charge, to any person +// obtaining a copy of this software and associated documentation +// files (the "Software"), to deal in the Software without +// restriction, including without limitation the rights to use, +// copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the +// Software is furnished to do so, subject to the following +// conditions: +// +// The above copyright notice and this permission notice shall be +// included in all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES +// OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT +// HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +// WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +// OTHER DEALINGS IN THE SOFTWARE. + +#import "AudioFileReader.h" + +@interface AudioFileReader () +{ + RingBuffer *ringBuffer; +} + +@property AudioStreamBasicDescription outputFormat; +@property ExtAudioFileRef inputFile; +@property UInt32 outputBufferSize; +@property float *outputBuffer; +@property float *holdingBuffer; +@property UInt32 numSamplesReadPerPacket; +@property UInt32 desiredPrebufferedSamples; +@property SInt64 currentFileTime; +@property dispatch_source_t callbackTimer; + + +- (void)bufferNewAudio; + +@end + + + +@implementation AudioFileReader + +@synthesize outputFormat = _outputFormat; +@synthesize inputFile = _inputFile; +@synthesize outputBuffer = _outputBuffer; +@synthesize holdingBuffer = _holdingBuffer; +@synthesize outputBufferSize = _outputBufferSize; +@synthesize numSamplesReadPerPacket = _numSamplesReadPerPacket; +@synthesize desiredPrebufferedSamples = _desiredPrebufferedSamples; +@synthesize currentFileTime = _currentFileTime; +@synthesize callbackTimer = _callbackTimer; +@synthesize currentTime = _currentTime; +@synthesize duration = _duration; +@synthesize samplingRate = _samplingRate; +@synthesize latency = _latency; +@synthesize numChannels = _numChannels; +@synthesize audioFileURL = _audioFileURL; +@synthesize readerBlock = _readerBlock; +@synthesize playing = _playing; + +- (void)dealloc +{ + // If the dispatch timer is active, close it off + if (self.playing) + [self pause]; + + self.readerBlock = nil; + + // Close the ExtAudioFile + ExtAudioFileDispose(self.inputFile); + + free(self.outputBuffer); + free(self.holdingBuffer); + + delete ringBuffer; + + [super dealloc]; +} + + +- (id)initWithAudioFileURL:(NSURL *)urlToAudioFile samplingRate:(float)thisSamplingRate numChannels:(UInt32)thisNumChannels +{ + self = [super init]; + if (self) + { + + // Zero-out our timer, so we know we're not using our callback yet + self.callbackTimer = nil; + + + // Open a reference to the audio file + self.audioFileURL = urlToAudioFile; + CFURLRef audioFileRef = (CFURLRef)self.audioFileURL; + CheckError(ExtAudioFileOpenURL(audioFileRef, &_inputFile), "Opening file URL (ExtAudioFileOpenURL)"); + + + // Set a few defaults and presets + self.samplingRate = thisSamplingRate; + self.numChannels = thisNumChannels; + self.latency = .011609977; // 512 samples / ( 44100 samples / sec ) default + + + // We're going to impose a format upon the input file + // Single-channel float does the trick. + _outputFormat.mSampleRate = self.samplingRate; + _outputFormat.mFormatID = kAudioFormatLinearPCM; + _outputFormat.mFormatFlags = kAudioFormatFlagIsFloat; + _outputFormat.mBytesPerPacket = 4*self.numChannels; + _outputFormat.mFramesPerPacket = 1; + _outputFormat.mBytesPerFrame = 4*self.numChannels; + _outputFormat.mChannelsPerFrame = self.numChannels; + _outputFormat.mBitsPerChannel = 32; + + // Apply the format to our file + ExtAudioFileSetProperty(_inputFile, kExtAudioFileProperty_ClientDataFormat, sizeof(AudioStreamBasicDescription), &_outputFormat); + + + // Arbitrary buffer sizes that don't matter so much as long as they're "big enough" + self.outputBufferSize = 65536; + self.numSamplesReadPerPacket = 8192; + self.desiredPrebufferedSamples = self.numSamplesReadPerPacket*2; + self.outputBuffer = (float *)calloc(2*self.samplingRate, sizeof(float)); + self.holdingBuffer = (float *)calloc(2*self.samplingRate, sizeof(float)); + + + // Allocate a ring buffer (this is what's going to buffer our audio) + ringBuffer = new RingBuffer(self.outputBufferSize, self.numChannels); + + + // Fill up the buffers, so we're ready to play immediately + [self bufferNewAudio]; + + } + return self; +} + +- (void)clearBuffer +{ + ringBuffer->Clear(); +} + +- (void)bufferNewAudio +{ + + if (ringBuffer->NumUnreadFrames() > self.desiredPrebufferedSamples) + return; + + memset(self.outputBuffer, 0, sizeof(float)*self.desiredPrebufferedSamples); + + AudioBufferList incomingAudio; + incomingAudio.mNumberBuffers = 1; + incomingAudio.mBuffers[0].mNumberChannels = self.numChannels; + incomingAudio.mBuffers[0].mDataByteSize = self.outputBufferSize; + incomingAudio.mBuffers[0].mData = self.outputBuffer; + + // Figure out where we are in the file + SInt64 frameOffset = 0; + ExtAudioFileTell(self.inputFile, &frameOffset); + self.currentFileTime = (float)frameOffset / self.samplingRate; + + // Read the audio + UInt32 framesRead = self.numSamplesReadPerPacket; + ExtAudioFileRead(self.inputFile, &framesRead, &incomingAudio); + + // Update where we are in the file + ExtAudioFileTell(self.inputFile, &frameOffset); + self.currentFileTime = (float)frameOffset / self.samplingRate; + + // Add the new audio to the ring buffer + ringBuffer->AddNewInterleavedFloatData(self.outputBuffer, framesRead, self.numChannels); + + if ((self.currentFileTime - self.duration) < 0.01 && framesRead == 0) { + // modified to allow for auto-stopping. // + // Need to change your output block to check for [fileReader playing] and nuke your fileReader if it is // + // not playing and not paused, on the next frame. Otherwise, the sound clip's final buffer is not played. // +// self.currentTime = 0.0f; + [self stop]; + ringBuffer->Clear(); + } + + +} + +- (float)getCurrentTime +{ + return self.currentFileTime - ringBuffer->NumUnreadFrames()/self.samplingRate; +} + + +- (void)setCurrentTime:(float)thisCurrentTime +{ + dispatch_async(dispatch_get_main_queue(), ^{ + [self pause]; + ExtAudioFileSeek(self.inputFile, thisCurrentTime*self.samplingRate); + + [self clearBuffer]; + [self bufferNewAudio]; + + [self play]; + }); +} + +- (float)getDuration +{ + // We're going to directly calculate the duration of the audio file (in seconds) + SInt64 framesInThisFile; + UInt32 propertySize = sizeof(framesInThisFile); + ExtAudioFileGetProperty(self.inputFile, kExtAudioFileProperty_FileLengthFrames, &propertySize, &framesInThisFile); + + AudioStreamBasicDescription fileStreamFormat; + propertySize = sizeof(AudioStreamBasicDescription); + ExtAudioFileGetProperty(self.inputFile, kExtAudioFileProperty_FileDataFormat, &propertySize, &fileStreamFormat); + + return (float)framesInThisFile/(float)fileStreamFormat.mSampleRate; + +} + +- (void)configureReaderCallback +{ + + if (!self.callbackTimer) + { + self.callbackTimer = dispatch_source_create(DISPATCH_SOURCE_TYPE_TIMER, 0, 0, dispatch_get_main_queue()); + UInt32 numSamplesPerCallback = (UInt32)( self.latency * self.samplingRate ); + dispatch_source_set_timer(self.callbackTimer, dispatch_walltime(NULL, 0), self.latency*NSEC_PER_SEC, 0); + dispatch_source_set_event_handler(self.callbackTimer, ^{ + + if (self.playing) { + + if (self.readerBlock) { + // Suck some audio down from our ring buffer + [self retrieveFreshAudio:self.holdingBuffer numFrames:numSamplesPerCallback numChannels:self.numChannels]; + + // Call out with the audio that we've got. + self.readerBlock(self.holdingBuffer, numSamplesPerCallback, self.numChannels); + } + + // Asynchronously fill up the buffer (if it needs filling) + dispatch_async(dispatch_get_main_queue(), ^{ + [self bufferNewAudio]; + }); + + } + + }); + + dispatch_resume(self.callbackTimer); + } +} + + +- (void)retrieveFreshAudio:(float *)buffer numFrames:(UInt32)thisNumFrames numChannels:(UInt32)thisNumChannels +{ + ringBuffer->FetchInterleavedData(buffer, thisNumFrames, thisNumChannels); +} + + +- (void)play; +{ + + // Configure (or if necessary, create and start) the timer for retrieving audio + if (!self.playing) { + [self configureReaderCallback]; + self.playing = TRUE; + } + +} + +- (void)pause +{ + // Pause the dispatch timer for retrieving the MP3 audio + self.playing = FALSE; +} + +- (void)stop +{ + // Release the dispatch timer because it holds a reference to this class instance + [self pause]; + if (self.callbackTimer) { + dispatch_release(self.callbackTimer); + } +} + + +@end diff --git a/Novocaine/AudioFileWriter.h b/Novocaine/AudioFileWriter.h new file mode 100755 index 0000000..3f3d051 --- /dev/null +++ b/Novocaine/AudioFileWriter.h @@ -0,0 +1,66 @@ +// +// AudioFileWriter.h +// Novocaine +// +// Copyright (c) 2012 Alex Wiltschko +// +// Permission is hereby granted, free of charge, to any person +// obtaining a copy of this software and associated documentation +// files (the "Software"), to deal in the Software without +// restriction, including without limitation the rights to use, +// copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the +// Software is furnished to do so, subject to the following +// conditions: +// +// The above copyright notice and this permission notice shall be +// included in all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES +// OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT +// HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +// WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +// OTHER DEALINGS IN THE SOFTWARE. + +#import +#import "Novocaine.h" + + +@interface AudioFileWriter : NSObject +{ + float currentTime; + float duration; + float samplingRate; + float latency; + UInt32 numChannels; + NSURL *audioFileURL; + + OutputBlock writerBlock; + + BOOL recording; +} + +@property (getter=getDuration, readonly) float currentTime; +@property (getter=getDuration) float duration; +@property float samplingRate; +@property UInt32 numChannels; +@property float latency; +@property (nonatomic, copy) NSURL *audioFileURL; +@property (nonatomic, copy) InputBlock writerBlock; +@property BOOL recording; + + +- (id)initWithAudioFileURL:(NSURL *)urlToAudioFile samplingRate:(float)thisSamplingRate numChannels:(UInt32)thisNumChannels; + +// You use this method to grab audio if you have your own callback. +// The buffer'll fill at the speed the audio is normally being played. +- (void)writeNewAudio:(float *)newData numFrames:(UInt32)thisNumFrames numChannels:(UInt32)thisNumChannels; + +- (void)record; +- (void)pause; +- (void)stop; + +@end diff --git a/Novocaine/AudioFileWriter.m b/Novocaine/AudioFileWriter.m new file mode 100755 index 0000000..5500ca0 --- /dev/null +++ b/Novocaine/AudioFileWriter.m @@ -0,0 +1,247 @@ +// +// AudioFileWriter.m +// Novocaine +// +// Copyright (c) 2012 Alex Wiltschko +// +// Permission is hereby granted, free of charge, to any person +// obtaining a copy of this software and associated documentation +// files (the "Software"), to deal in the Software without +// restriction, including without limitation the rights to use, +// copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the +// Software is furnished to do so, subject to the following +// conditions: +// +// The above copyright notice and this permission notice shall be +// included in all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES +// OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT +// HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +// WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +// OTHER DEALINGS IN THE SOFTWARE. +#import "AudioFileWriter.h" +#import + +@interface AudioFileWriter() + +@property AudioStreamBasicDescription outputFormat; +@property ExtAudioFileRef outputFile; +@property UInt32 outputBufferSize; +@property float *outputBuffer; +@property float *holdingBuffer; +@property SInt64 currentFileTime; +@property dispatch_source_t callbackTimer; +@property (readwrite) float currentTime; + +@end + + + +@implementation AudioFileWriter + +static pthread_mutex_t outputAudioFileLock; + +@synthesize outputFormat = _outputFormat; +@synthesize outputFile = _outputFile; +@synthesize outputBuffer = _outputBuffer; +@synthesize holdingBuffer = _holdingBuffer; +@synthesize outputBufferSize = _outputBufferSize; +@synthesize currentFileTime = _currentFileTime; +@synthesize callbackTimer = _callbackTimer; + +@synthesize currentTime = _currentTime; +@synthesize duration = _duration; +@synthesize samplingRate = _samplingRate; +@synthesize latency = _latency; +@synthesize numChannels = _numChannels; +@synthesize audioFileURL = _audioFileURL; +@synthesize writerBlock = _writerBlock; +@synthesize recording = _recording; + +- (void)dealloc +{ + [self stop]; + + free(self.outputBuffer); + free(self.holdingBuffer); + + [super dealloc]; +} + +- (id)initWithAudioFileURL:(NSURL *)urlToAudioFile samplingRate:(float)thisSamplingRate numChannels:(UInt32)thisNumChannels +{ + if(thisNumChannels == 0){ + NSLog(@"WARNING. AudioFileWriter initialized with numChannels of zero. This is bad."); + } + + self = [super init]; + if (self) + { + + // Zero-out our timer, so we know we're not using our callback yet + self.callbackTimer = nil; + + + // Open a reference to the audio file + self.audioFileURL = urlToAudioFile; + CFURLRef audioFileRef = (CFURLRef)self.audioFileURL; + + AudioStreamBasicDescription outputFileDesc = {44100.0, kAudioFormatMPEG4AAC, 0, 0, 1024, 0, thisNumChannels, 0, 0}; + + CheckError(ExtAudioFileCreateWithURL(audioFileRef, kAudioFileM4AType, &outputFileDesc, NULL, kAudioFileFlags_EraseFile, &_outputFile), "Creating file"); + + + // Set a few defaults and presets + self.samplingRate = thisSamplingRate; + self.numChannels = thisNumChannels; + self.currentTime = 0.0; + self.latency = .011609977; // 512 samples / ( 44100 samples / sec ) default + + + // We're going to impose a format upon the input file + // Single-channel float does the trick. + _outputFormat.mSampleRate = self.samplingRate; + _outputFormat.mFormatID = kAudioFormatLinearPCM; + _outputFormat.mFormatFlags = kAudioFormatFlagIsFloat; + _outputFormat.mBytesPerPacket = 4*self.numChannels; + _outputFormat.mFramesPerPacket = 1; + _outputFormat.mBytesPerFrame = 4*self.numChannels; + _outputFormat.mChannelsPerFrame = self.numChannels; + _outputFormat.mBitsPerChannel = 32; + + // Apply the format to our file + ExtAudioFileSetProperty(_outputFile, kExtAudioFileProperty_ClientDataFormat, sizeof(AudioStreamBasicDescription), &_outputFormat); + + + // Arbitrary buffer sizes that don't matter so much as long as they're "big enough" + self.outputBuffer = (float *)calloc(2*self.samplingRate, sizeof(float)); + self.holdingBuffer = (float *)calloc(2*self.samplingRate, sizeof(float)); + + pthread_mutex_init(&outputAudioFileLock, NULL); + + // mutex here // + if( 0 == pthread_mutex_trylock( &outputAudioFileLock ) ) + { + CheckError( ExtAudioFileWriteAsync(self.outputFile, 0, NULL), "Initializing audio file"); + } + pthread_mutex_unlock( &outputAudioFileLock ); + + } + return self; +} + +- (void)writeNewAudio:(float *)newData numFrames:(UInt32)thisNumFrames numChannels:(UInt32)thisNumChannels +{ + UInt32 numIncomingBytes = thisNumFrames*thisNumChannels*sizeof(float); + memcpy(self.outputBuffer, newData, numIncomingBytes); + + AudioBufferList outgoingAudio; + outgoingAudio.mNumberBuffers = 1; + outgoingAudio.mBuffers[0].mNumberChannels = thisNumChannels; + outgoingAudio.mBuffers[0].mDataByteSize = numIncomingBytes; + outgoingAudio.mBuffers[0].mData = self.outputBuffer; + + if( 0 == pthread_mutex_trylock( &outputAudioFileLock ) ) + { + ExtAudioFileWriteAsync(self.outputFile, thisNumFrames, &outgoingAudio); + } + pthread_mutex_unlock( &outputAudioFileLock ); + + // Figure out where we are in the file + SInt64 frameOffset = 0; + ExtAudioFileTell(self.outputFile, &frameOffset); + self.currentTime = (float)frameOffset / self.samplingRate; + +} + + +- (float)getDuration +{ + // We're going to directly calculate the duration of the audio file (in seconds) + SInt64 framesInThisFile; + UInt32 propertySize = sizeof(framesInThisFile); + ExtAudioFileGetProperty(self.outputFile, kExtAudioFileProperty_FileLengthFrames, &propertySize, &framesInThisFile); + + AudioStreamBasicDescription fileStreamFormat; + propertySize = sizeof(AudioStreamBasicDescription); + ExtAudioFileGetProperty(self.outputFile, kExtAudioFileProperty_FileDataFormat, &propertySize, &fileStreamFormat); + + return (float)framesInThisFile/(float)fileStreamFormat.mSampleRate; + +} + + + +- (void)configureWriterCallback +{ + + if (!self.callbackTimer) + { + self.callbackTimer = dispatch_source_create(DISPATCH_SOURCE_TYPE_TIMER, 0, 0, dispatch_get_main_queue()); + } + + if (self.callbackTimer) + { + UInt32 numSamplesPerCallback = (UInt32)( self.latency * self.samplingRate ); + dispatch_source_set_timer(self.callbackTimer, dispatch_walltime(NULL, 0), self.latency*NSEC_PER_SEC, 0); + dispatch_source_set_event_handler(self.callbackTimer, ^{ + + + if (self.writerBlock) { + // Call out with the audio that we've got. + self.writerBlock(self.outputBuffer, numSamplesPerCallback, self.numChannels); + + // Get audio from the block supplier + [self writeNewAudio:self.outputBuffer numFrames:numSamplesPerCallback numChannels:self.numChannels]; + + } + + }); + + } + +} + + + +- (void)record; +{ + + // Configure (or if necessary, create and start) the timer for retrieving MP3 audio + [self configureWriterCallback]; + + if (!self.recording) + { + dispatch_resume(self.callbackTimer); + self.recording = TRUE; + } + +} + +- (void)stop +{ + // Close the + pthread_mutex_lock( &outputAudioFileLock ); + ExtAudioFileDispose(self.outputFile); + pthread_mutex_unlock( &outputAudioFileLock ); + self.recording = FALSE; +} + +- (void)pause +{ + // Pause the dispatch timer for retrieving the MP3 audio + if (self.callbackTimer) { + dispatch_suspend(self.callbackTimer); + self.recording = FALSE; + } +} + + + +@end + diff --git a/Novocaine/Novocaine.h b/Novocaine/Novocaine.h new file mode 100755 index 0000000..b2465ab --- /dev/null +++ b/Novocaine/Novocaine.h @@ -0,0 +1,184 @@ +// Copyright (c) 2012 Alex Wiltschko +// +// Permission is hereby granted, free of charge, to any person +// obtaining a copy of this software and associated documentation +// files (the "Software"), to deal in the Software without +// restriction, including without limitation the rights to use, +// copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the +// Software is furnished to do so, subject to the following +// conditions: +// +// The above copyright notice and this permission notice shall be +// included in all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES +// OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT +// HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +// WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +// OTHER DEALINGS IN THE SOFTWARE. + +#import +#import +#import + +#if defined __MAC_OS_X_VERSION_MAX_ALLOWED + #define USING_OSX + #include +#else + #define USING_IOS +#endif + +#include + + +#ifdef __cplusplus +extern "C" { +#endif + +static void CheckError(OSStatus error, const char *operation) +{ + if (error == noErr) return; + + char str[20]; + // see if it appears to be a 4-char-code + *(UInt32 *)(str + 1) = CFSwapInt32HostToBig(error); + if (isprint(str[1]) && isprint(str[2]) && isprint(str[3]) && isprint(str[4])) { + str[0] = str[5] = '\''; + str[6] = '\0'; + } else + // no, format it as an integer + sprintf(str, "%d", (int)error); + + fprintf(stderr, "Error: %s (%s)\n", operation, str); + + exit(1); +} + + +OSStatus inputCallback (void *inRefCon, + AudioUnitRenderActionFlags * ioActionFlags, + const AudioTimeStamp * inTimeStamp, + UInt32 inOutputBusNumber, + UInt32 inNumberFrames, + AudioBufferList * ioData); + +OSStatus renderCallback (void *inRefCon, + AudioUnitRenderActionFlags * ioActionFlags, + const AudioTimeStamp * inTimeStamp, + UInt32 inOutputBusNumber, + UInt32 inNumberFrames, + AudioBufferList * ioData); + + +#if defined (USING_IOS) +void sessionPropertyListener(void * inClientData, + AudioSessionPropertyID inID, + UInt32 inDataSize, + const void * inData); + +#endif + + +void sessionInterruptionListener(void *inClientData, UInt32 inInterruption); + +#ifdef __cplusplus +} +#endif + +typedef void (^OutputBlock)(float *data, UInt32 numFrames, UInt32 numChannels); +typedef void (^InputBlock)(float *data, UInt32 numFrames, UInt32 numChannels); + +#if defined (USING_IOS) +@interface Novocaine : NSObject +#elif defined (USING_OSX) +@interface Novocaine : NSObject +#endif +{ + // Audio Handling + AudioUnit inputUnit; + AudioUnit outputUnit; + AudioBufferList *inputBuffer; + + // Session Properties + BOOL inputAvailable; + NSString *inputRoute; + UInt32 numInputChannels; + UInt32 numOutputChannels; + Float64 samplingRate; + BOOL isInterleaved; + UInt32 numBytesPerSample; + AudioStreamBasicDescription inputFormat; + AudioStreamBasicDescription outputFormat; + + // Audio Processing + OutputBlock outputBlock; + InputBlock inputBlock; + + float *inData; + float *outData; + + BOOL playing; + // BOOL playThroughEnabled; + + +#if defined (USING_OSX) + AudioDeviceID *deviceIDs; + NSMutableArray *deviceNames; + AudioDeviceID defaultInputDeviceID; + NSString *defaultDeviceName; +#endif + +} + +@property AudioUnit inputUnit; +@property AudioUnit outputUnit; +@property AudioBufferList *inputBuffer; +@property (nonatomic, retain) OutputBlock outputBlock; +@property (nonatomic, retain) InputBlock inputBlock; +@property BOOL inputAvailable; +@property (nonatomic, retain) NSString *inputRoute; +@property UInt32 numInputChannels; +@property UInt32 numOutputChannels; +@property Float64 samplingRate; +@property BOOL isInterleaved; +@property UInt32 numBytesPerSample; +@property AudioStreamBasicDescription inputFormat; +@property AudioStreamBasicDescription outputFormat; + +// @property BOOL playThroughEnabled; +@property BOOL playing; +@property float *inData; +@property float *outData; + +#if defined (USING_OSX) +@property AudioDeviceID *deviceIDs; +@property (nonatomic, retain) NSMutableArray *deviceNames; +@property AudioDeviceID defaultInputDeviceID; +@property (nonatomic, retain) NSString *defaultInputDeviceName; +@property AudioDeviceID defaultOutputDeviceID; +@property (nonatomic, retain) NSString *defaultOutputDeviceName; +- (void)enumerateAudioDevices; +#endif + + +// Singleton methods ++ (Novocaine *) audioManager; + + +// Audio Unit methods +- (void)play; +- (void)pause; +- (void)setupAudio; +- (void)ifAudioInputIsAvailableThenSetupAudioSession; + +#if defined ( USING_IOS ) +- (void)checkSessionProperties; +- (void)checkAudioSource; +#endif + + +@end diff --git a/Novocaine/Novocaine.m b/Novocaine/Novocaine.m new file mode 100755 index 0000000..e6b6e41 --- /dev/null +++ b/Novocaine/Novocaine.m @@ -0,0 +1,902 @@ +// Copyright (c) 2012 Alex Wiltschko +// +// Permission is hereby granted, free of charge, to any person +// obtaining a copy of this software and associated documentation +// files (the "Software"), to deal in the Software without +// restriction, including without limitation the rights to use, +// copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the +// Software is furnished to do so, subject to the following +// conditions: +// +// The above copyright notice and this permission notice shall be +// included in all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES +// OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT +// HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +// WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +// OTHER DEALINGS IN THE SOFTWARE. +// +// TODO: +// Switching mic and speaker on/off +// +// HOUSEKEEPING AND NICE FEATURES: +// Disambiguate outputFormat (the AUHAL's stream format) +// More nuanced input detection on the Mac +// Route switching should work, check with iPhone +// Device switching should work, check with laptop. Read that damn book. +// Wrap logging with debug macros. +// Think about what should be public, what private. +// Ability to select non-default devices. + + +#import "Novocaine.h" +#define kInputBus 1 +#define kOutputBus 0 +#define kDefaultDevice 999999 + +#import "TargetConditionals.h" + +static Novocaine *audioManager = nil; + +@interface Novocaine() +- (void)setupAudio; + +- (NSString *)applicationDocumentsDirectory; + +@end + + +@implementation Novocaine +@synthesize inputUnit; +@synthesize outputUnit; +@synthesize inputBuffer; +@synthesize inputRoute, inputAvailable; +@synthesize numInputChannels, numOutputChannels; +@synthesize inputBlock, outputBlock; +@synthesize samplingRate; +@synthesize isInterleaved; +@synthesize numBytesPerSample; +@synthesize inData; +@synthesize outData; +@synthesize playing; + +@synthesize outputFormat; +@synthesize inputFormat; +// @synthesize playThroughEnabled; + +#if defined( USING_OSX ) +@synthesize deviceIDs; +@synthesize deviceNames; +@synthesize defaultInputDeviceID; +@synthesize defaultInputDeviceName; +@synthesize defaultOutputDeviceID; +@synthesize defaultOutputDeviceName; +#endif + +#pragma mark - Singleton Methods ++ (Novocaine *) audioManager +{ + @synchronized(self) + { + if (audioManager == nil) { + audioManager = [[Novocaine alloc] init]; + } + } + return audioManager; +} + ++ (id)allocWithZone:(NSZone *)zone { + @synchronized(self) { + if (audioManager == nil) { + audioManager = [super allocWithZone:zone]; + return audioManager; // assignment and return on first allocation + } + } + return nil; // on subsequent allocation attempts return nil +} + +- (id)copyWithZone:(NSZone *)zone +{ + return self; +} + +- (id)retain { + return self; +} + +- (unsigned)retainCount { + return UINT_MAX; // denotes an object that cannot be released +} + +- (oneway void)release { + //do nothing +} + +- (id)init +{ + if (self = [super init]) + { + + // Initialize some stuff k? + outputBlock = nil; + inputBlock = nil; + + // Initialize a float buffer to hold audio + self.inData = (float *)calloc(8192, sizeof(float)); // probably more than we'll need + self.outData = (float *)calloc(8192, sizeof(float)); + + self.inputBlock = nil; + self.outputBlock = nil; + +#if defined ( USING_OSX ) + self.deviceNames = [[NSMutableArray alloc] initWithCapacity:100]; // more than we'll need +#endif + + self.playing = NO; + // self.playThroughEnabled = NO; + + // Fire up the audio session ( with steady error checking ... ) + [self ifAudioInputIsAvailableThenSetupAudioSession]; + + return self; + + } + + return nil; +} + + +#pragma mark - Block Handling +- (void)setInputBlock:(InputBlock)newInputBlock +{ + InputBlock tmpBlock = inputBlock; + inputBlock = Block_copy(newInputBlock); + Block_release(tmpBlock); +} + +- (void)setOutputBlock:(OutputBlock)newOutputBlock +{ + OutputBlock tmpBlock = outputBlock; + outputBlock = Block_copy(newOutputBlock); + Block_release(tmpBlock); +} + + + +#pragma mark - Audio Methods + + +- (void)ifAudioInputIsAvailableThenSetupAudioSession { + // Initialize and configure the audio session, and add an interuption listener + +#if defined ( USING_IOS ) + CheckError( AudioSessionInitialize(NULL, NULL, sessionInterruptionListener, self), "Couldn't initialize audio session"); + [self checkAudioSource]; +#elif defined ( USING_OSX ) + // TODO: grab the audio device + [self enumerateAudioDevices]; + self.inputAvailable = YES; +#endif + + // Check the session properties (available input routes, number of channels, etc) + + + + // If we do have input, then let's rock 'n roll. + if (self.inputAvailable) { + [self setupAudio]; + [self play]; + } + + // If we don't have input, then ask the user to provide some + else + { +#if defined ( USING_IOS ) + UIAlertView *noInputAlert = + [[UIAlertView alloc] initWithTitle:@"No Audio Input" + message:@"Couldn't find any audio input. Plug in your Apple headphones or another microphone." + delegate:self + cancelButtonTitle:@"OK" + otherButtonTitles:nil]; + + [noInputAlert show]; + [noInputAlert release]; +#endif + + } +} + + +- (void)setupAudio +{ + + + // --- Audio Session Setup --- + // --------------------------- + +#if defined ( USING_IOS ) + + UInt32 sessionCategory = kAudioSessionCategory_PlayAndRecord; + CheckError( AudioSessionSetProperty (kAudioSessionProperty_AudioCategory, + sizeof (sessionCategory), + &sessionCategory), "Couldn't set audio category"); + + + // Add a property listener, to listen to changes to the session + CheckError( AudioSessionAddPropertyListener(kAudioSessionProperty_AudioRouteChange, sessionPropertyListener, self), "Couldn't add audio session property listener"); + + // Change the session propert for output route (use the speaker!) + UInt32 doChangeDefaultRoute = 1; + AudioSessionSetProperty (kAudioSessionProperty_OverrideCategoryDefaultToSpeaker, sizeof (doChangeDefaultRoute), &doChangeDefaultRoute); + + // Set the buffer size, this will affect the number of samples that get rendered every time the audio callback is fired + // A small number will get you lower latency audio, but will make your processor work harder +#if !TARGET_IPHONE_SIMULATOR + Float32 preferredBufferSize = 0.0232; + CheckError( AudioSessionSetProperty(kAudioSessionProperty_PreferredHardwareIOBufferDuration, sizeof(preferredBufferSize), &preferredBufferSize), "Couldn't set the preferred buffer duration"); +#endif + + + // Set the audio session active + CheckError( AudioSessionSetActive(YES), "Couldn't activate the audio session"); + + [self checkSessionProperties]; + +#elif defined ( USING_OSX ) + + + +#endif + + + + // ----- Audio Unit Setup ----- + // ---------------------------- + + + // Describe the output unit. + +#if defined ( USING_OSX ) + AudioComponentDescription inputDescription = {0}; + inputDescription.componentType = kAudioUnitType_Output; + inputDescription.componentSubType = kAudioUnitSubType_HALOutput; + inputDescription.componentManufacturer = kAudioUnitManufacturer_Apple; + + AudioComponentDescription outputDescription = {0}; + outputDescription.componentType = kAudioUnitType_Output; + outputDescription.componentSubType = kAudioUnitSubType_HALOutput; + outputDescription.componentManufacturer = kAudioUnitManufacturer_Apple; + +#elif defined (USING_IOS) + AudioComponentDescription inputDescription = {0}; + inputDescription.componentType = kAudioUnitType_Output; + inputDescription.componentSubType = kAudioUnitSubType_RemoteIO; + inputDescription.componentManufacturer = kAudioUnitManufacturer_Apple; + +#endif + + + + // Get component + AudioComponent inputComponent = AudioComponentFindNext(NULL, &inputDescription); + CheckError( AudioComponentInstanceNew(inputComponent, &inputUnit), "Couldn't create the output audio unit"); + +#if defined ( USING_OSX ) + AudioComponent outputComponent = AudioComponentFindNext(NULL, &outputDescription); + CheckError( AudioComponentInstanceNew(outputComponent, &outputUnit), "Couldn't create the output audio unit"); +#endif + + + // Enable input + UInt32 one = 1; + CheckError( AudioUnitSetProperty(inputUnit, + kAudioOutputUnitProperty_EnableIO, + kAudioUnitScope_Input, + kInputBus, + &one, + sizeof(one)), "Couldn't enable IO on the input scope of output unit"); + +#if defined ( USING_OSX ) + // Disable output on the input unit + // (only on Mac, since on the iPhone, the input unit is also the output unit) + UInt32 zero = 0; + CheckError( AudioUnitSetProperty(inputUnit, + kAudioOutputUnitProperty_EnableIO, + kAudioUnitScope_Output, + kOutputBus, + &zero, + sizeof(UInt32)), "Couldn't disable output on the audio unit"); + + // Enable output + CheckError( AudioUnitSetProperty(outputUnit, + kAudioOutputUnitProperty_EnableIO, + kAudioUnitScope_Output, + kOutputBus, + &one, + sizeof(one)), "Couldn't enable IO on the input scope of output unit"); + + // Disable input + CheckError( AudioUnitSetProperty(outputUnit, + kAudioOutputUnitProperty_EnableIO, + kAudioUnitScope_Input, + kInputBus, + &zero, + sizeof(UInt32)), "Couldn't disable output on the audio unit"); + +#endif + + // TODO: first query the hardware for desired stream descriptions + // Check the input stream format + +# if defined ( USING_IOS ) + UInt32 size; + size = sizeof( AudioStreamBasicDescription ); + CheckError( AudioUnitGetProperty( inputUnit, + kAudioUnitProperty_StreamFormat, + kAudioUnitScope_Input, + 1, + &inputFormat, + &size ), + "Couldn't get the hardware input stream format"); + + // Check the output stream format + size = sizeof( AudioStreamBasicDescription ); + CheckError( AudioUnitGetProperty( inputUnit, + kAudioUnitProperty_StreamFormat, + kAudioUnitScope_Output, + 1, + &outputFormat, + &size ), + "Couldn't get the hardware output stream format"); + + // TODO: check this works on iOS! + inputFormat.mSampleRate = 44100.0; + outputFormat.mSampleRate = 44100.0; + self.samplingRate = inputFormat.mSampleRate; + self.numBytesPerSample = inputFormat.mBitsPerChannel / 8; + + size = sizeof(AudioStreamBasicDescription); + CheckError(AudioUnitSetProperty(inputUnit, + kAudioUnitProperty_StreamFormat, + kAudioUnitScope_Output, + kInputBus, + &outputFormat, + size), + "Couldn't set the ASBD on the audio unit (after setting its sampling rate)"); + + +# elif defined ( USING_OSX ) + + UInt32 size = sizeof(AudioDeviceID); + if(self.defaultInputDeviceID == kAudioDeviceUnknown) + { + AudioDeviceID thisDeviceID; + UInt32 propsize = sizeof(AudioDeviceID); + CheckError(AudioHardwareGetProperty(kAudioHardwarePropertyDefaultInputDevice, &propsize, &thisDeviceID), "Could not get the default device"); + self.defaultInputDeviceID = thisDeviceID; + } + + if (self.defaultOutputDeviceID == kAudioDeviceUnknown) + { + AudioDeviceID thisDeviceID; + UInt32 propsize = sizeof(AudioDeviceID); + CheckError(AudioHardwareGetProperty(kAudioHardwarePropertyDefaultOutputDevice, &propsize, &thisDeviceID), "Could not get the default device"); + self.defaultOutputDeviceID = thisDeviceID; + + } + + + // Set the current device to the default input unit. + CheckError( AudioUnitSetProperty( inputUnit, + kAudioOutputUnitProperty_CurrentDevice, + kAudioUnitScope_Global, + kOutputBus, + &defaultInputDeviceID, + sizeof(AudioDeviceID) ), "Couldn't set the current input audio device"); + + CheckError( AudioUnitSetProperty( outputUnit, + kAudioOutputUnitProperty_CurrentDevice, + kAudioUnitScope_Global, + kOutputBus, + &defaultOutputDeviceID, + sizeof(AudioDeviceID) ), "Couldn't set the current output audio device"); + + + UInt32 propertySize = sizeof(AudioStreamBasicDescription); + CheckError(AudioUnitGetProperty(inputUnit, + kAudioUnitProperty_StreamFormat, + kAudioUnitScope_Output, + kInputBus, + &outputFormat, + &propertySize), + "Couldn't get ASBD from input unit"); + + + // 9/6/10 - check the input device's stream format + CheckError(AudioUnitGetProperty(inputUnit, + kAudioUnitProperty_StreamFormat, + kAudioUnitScope_Input, + kInputBus, + &inputFormat, + &propertySize), + "Couldn't get ASBD from input unit"); + + + outputFormat.mSampleRate = inputFormat.mSampleRate; +// outputFormat.mFormatFlags = kAudioFormatFlagsCanonical; + self.samplingRate = inputFormat.mSampleRate; + self.numBytesPerSample = inputFormat.mBitsPerChannel / 8; + + self.numInputChannels = inputFormat.mChannelsPerFrame; + self.numOutputChannels = outputFormat.mChannelsPerFrame; + + propertySize = sizeof(AudioStreamBasicDescription); + CheckError(AudioUnitSetProperty(inputUnit, + kAudioUnitProperty_StreamFormat, + kAudioUnitScope_Output, + kInputBus, + &outputFormat, + propertySize), + "Couldn't set the ASBD on the audio unit (after setting its sampling rate)"); + + +#endif + + + +#if defined ( USING_IOS ) + UInt32 numFramesPerBuffer; + size = sizeof(UInt32); + CheckError(AudioUnitGetProperty(inputUnit, + kAudioUnitProperty_MaximumFramesPerSlice, + kAudioUnitScope_Global, + kOutputBus, + &numFramesPerBuffer, + &size), + "Couldn't get the number of frames per callback"); + + UInt32 bufferSizeBytes = outputFormat.mBytesPerFrame * outputFormat.mFramesPerPacket * numFramesPerBuffer; + +#elif defined ( USING_OSX ) + // Get the size of the IO buffer(s) + UInt32 bufferSizeFrames = 0; + size = sizeof(UInt32); + CheckError (AudioUnitGetProperty(self.inputUnit, + kAudioDevicePropertyBufferFrameSize, + kAudioUnitScope_Global, + 0, + &bufferSizeFrames, + &size), + "Couldn't get buffer frame size from input unit"); + UInt32 bufferSizeBytes = bufferSizeFrames * sizeof(Float32); +#endif + + + + if (outputFormat.mFormatFlags & kAudioFormatFlagIsNonInterleaved) { + // The audio is non-interleaved + printf("Not interleaved!\n"); + self.isInterleaved = NO; + + // allocate an AudioBufferList plus enough space for array of AudioBuffers + UInt32 propsize = offsetof(AudioBufferList, mBuffers[0]) + (sizeof(AudioBuffer) * outputFormat.mChannelsPerFrame); + + //malloc buffer lists + self.inputBuffer = (AudioBufferList *)malloc(propsize); + self.inputBuffer->mNumberBuffers = outputFormat.mChannelsPerFrame; + + //pre-malloc buffers for AudioBufferLists + for(UInt32 i =0; i< self.inputBuffer->mNumberBuffers ; i++) { + self.inputBuffer->mBuffers[i].mNumberChannels = 1; + self.inputBuffer->mBuffers[i].mDataByteSize = bufferSizeBytes; + self.inputBuffer->mBuffers[i].mData = malloc(bufferSizeBytes); + memset(self.inputBuffer->mBuffers[i].mData, 0, bufferSizeBytes); + } + + } else { + printf ("Format is interleaved\n"); + self.isInterleaved = YES; + + // allocate an AudioBufferList plus enough space for array of AudioBuffers + UInt32 propsize = offsetof(AudioBufferList, mBuffers[0]) + (sizeof(AudioBuffer) * 1); + + //malloc buffer lists + self.inputBuffer = (AudioBufferList *)malloc(propsize); + self.inputBuffer->mNumberBuffers = 1; + + //pre-malloc buffers for AudioBufferLists + self.inputBuffer->mBuffers[0].mNumberChannels = outputFormat.mChannelsPerFrame; + self.inputBuffer->mBuffers[0].mDataByteSize = bufferSizeBytes; + self.inputBuffer->mBuffers[0].mData = malloc(bufferSizeBytes); + memset(self.inputBuffer->mBuffers[0].mData, 0, bufferSizeBytes); + + } + + + // Slap a render callback on the unit + AURenderCallbackStruct callbackStruct; + callbackStruct.inputProc = inputCallback; + callbackStruct.inputProcRefCon = self; + + CheckError( AudioUnitSetProperty(inputUnit, + kAudioOutputUnitProperty_SetInputCallback, + kAudioUnitScope_Global, + 0, + &callbackStruct, + sizeof(callbackStruct)), "Couldn't set the callback on the input unit"); + + + callbackStruct.inputProc = renderCallback; + callbackStruct.inputProcRefCon = self; +# if defined ( USING_OSX ) + CheckError( AudioUnitSetProperty(outputUnit, + kAudioUnitProperty_SetRenderCallback, + kAudioUnitScope_Input, + 0, + &callbackStruct, + sizeof(callbackStruct)), + "Couldn't set the render callback on the input unit"); + +#elif defined ( USING_IOS ) + CheckError( AudioUnitSetProperty(inputUnit, + kAudioUnitProperty_SetRenderCallback, + kAudioUnitScope_Input, + 0, + &callbackStruct, + sizeof(callbackStruct)), + "Couldn't set the render callback on the input unit"); +#endif + + + + + CheckError(AudioUnitInitialize(inputUnit), "Couldn't initialize the output unit"); +#if defined ( USING_OSX ) + CheckError(AudioUnitInitialize(outputUnit), "Couldn't initialize the output unit"); +#endif + + + +} + +#if defined (USING_OSX) +- (void)enumerateAudioDevices +{ + UInt32 propSize; + + UInt32 propsize = sizeof(AudioDeviceID); + CheckError(AudioHardwareGetProperty(kAudioHardwarePropertyDefaultInputDevice, &propsize, &defaultInputDeviceID), "Could not get the default device"); + + AudioHardwareGetPropertyInfo( kAudioHardwarePropertyDevices, &propSize, NULL ); + uint32_t deviceCount = ( propSize / sizeof(AudioDeviceID) ); + + // Allocate the device IDs + self.deviceIDs = (AudioDeviceID *)calloc(deviceCount, sizeof(AudioDeviceID)); + [deviceNames removeAllObjects]; + + // Get all the device IDs + CheckError( AudioHardwareGetProperty( kAudioHardwarePropertyDevices, &propSize, self.deviceIDs ), "Could not get device IDs"); + + // Get the names of all the device IDs + for( int i = 0; i < deviceCount; i++ ) + { + UInt32 size = sizeof(AudioDeviceID); + CheckError( AudioDeviceGetPropertyInfo( self.deviceIDs[i], 0, true, kAudioDevicePropertyDeviceName, &size, NULL ), "Could not get device name length"); + + char cStringOfDeviceName[size]; + CheckError( AudioDeviceGetProperty( self.deviceIDs[i], 0, true, kAudioDevicePropertyDeviceName, &size, cStringOfDeviceName ), "Could not get device name"); + NSString *thisDeviceName = [NSString stringWithCString:cStringOfDeviceName encoding:NSUTF8StringEncoding]; + + NSLog(@"Device: %@, ID: %d", thisDeviceName, self.deviceIDs[i]); + [deviceNames addObject:thisDeviceName]; + } + +} + +#endif + + + +- (void)pause { + + if (playing) { + CheckError( AudioOutputUnitStop(inputUnit), "Couldn't stop the output unit"); +#if defined ( USING_OSX ) + CheckError( AudioOutputUnitStop(outputUnit), "Couldn't stop the output unit"); +#endif + playing = NO; + } + +} + +- (void)play { + + UInt32 isInputAvailable=0; + UInt32 size = sizeof(isInputAvailable); + +#if defined ( USING_IOS ) + CheckError( AudioSessionGetProperty(kAudioSessionProperty_AudioInputAvailable, + &size, + &isInputAvailable), "Couldn't check if input was available"); + +#elif defined ( USING_OSX ) + isInputAvailable = 1; + +#endif + + + self.inputAvailable = isInputAvailable; + + if ( self.inputAvailable ) { + // Set the audio session category for simultaneous play and record + if (!playing) { + CheckError( AudioOutputUnitStart(inputUnit), "Couldn't start the output unit"); +#if defined ( USING_OSX ) + CheckError( AudioOutputUnitStart(outputUnit), "Couldn't start the output unit"); +#endif + + self.playing = YES; + + } + } + +} + + +#pragma mark - Render Methods +OSStatus inputCallback (void *inRefCon, + AudioUnitRenderActionFlags * ioActionFlags, + const AudioTimeStamp * inTimeStamp, + UInt32 inOutputBusNumber, + UInt32 inNumberFrames, + AudioBufferList * ioData) +{ + + + Novocaine *sm = (Novocaine *)inRefCon; + + if (!sm.playing) + return noErr; + if (sm.inputBlock == nil) + return noErr; + + + // Check the current number of channels + // Let's actually grab the audio + CheckError( AudioUnitRender(sm.inputUnit, ioActionFlags, inTimeStamp, inOutputBusNumber, inNumberFrames, sm.inputBuffer), "Couldn't render the output unit"); + + + // Convert the audio in something manageable + // For Float32s ... + if ( sm.numBytesPerSample == 4 ) // then we've already got flaots + { + + float zero = 0.0f; + if ( ! sm.isInterleaved ) { // if the data is in separate buffers, make it interleaved + for (int i=0; i < sm.numInputChannels; ++i) { + vDSP_vsadd((float *)sm.inputBuffer->mBuffers[i].mData, 1, &zero, sm.inData+i, + sm.numInputChannels, inNumberFrames); + } + } + else { // if the data is already interleaved, copy it all in one happy block. + // TODO: check mDataByteSize is proper + memcpy(sm.inData, (float *)sm.inputBuffer->mBuffers[0].mData, sm.inputBuffer->mBuffers[0].mDataByteSize); + } + } + + // For SInt16s ... + else if ( sm.numBytesPerSample == 2 ) // then we're dealing with SInt16's + { + if ( ! sm.isInterleaved ) { + for (int i=0; i < sm.numInputChannels; ++i) { + vDSP_vflt16((SInt16 *)sm.inputBuffer->mBuffers[i].mData, 1, sm.inData+i, sm.numInputChannels, inNumberFrames); + } + } + else { + vDSP_vflt16((SInt16 *)sm.inputBuffer->mBuffers[0].mData, 1, sm.inData, 1, inNumberFrames*sm.numInputChannels); + } + + float scale = 1.0 / (float)INT16_MAX; + vDSP_vsmul(sm.inData, 1, &scale, sm.inData, 1, inNumberFrames*sm.numInputChannels); + } + + // Now do the processing! + sm.inputBlock(sm.inData, inNumberFrames, sm.numInputChannels); + + return noErr; + + +} + +OSStatus renderCallback (void *inRefCon, + AudioUnitRenderActionFlags * ioActionFlags, + const AudioTimeStamp * inTimeStamp, + UInt32 inOutputBusNumber, + UInt32 inNumberFrames, + AudioBufferList * ioData) +{ + + + Novocaine *sm = (Novocaine *)inRefCon; + float zero = 0.0; + + + for (int iBuffer=0; iBuffer < ioData->mNumberBuffers; ++iBuffer) { + memset(ioData->mBuffers[iBuffer].mData, 0, ioData->mBuffers[iBuffer].mDataByteSize); + } + + if (!sm.playing) + return noErr; + if (!sm.outputBlock) + return noErr; + + + // Collect data to render from the callbacks + sm.outputBlock(sm.outData, inNumberFrames, sm.numOutputChannels); + + + // Put the rendered data into the output buffer + // TODO: convert SInt16 ranges to float ranges. + if ( sm.numBytesPerSample == 4 ) // then we've already got floats + { + + for (int iBuffer=0; iBuffer < ioData->mNumberBuffers; ++iBuffer) { + + int thisNumChannels = ioData->mBuffers[iBuffer].mNumberChannels; + + for (int iChannel = 0; iChannel < thisNumChannels; ++iChannel) { + vDSP_vsadd(sm.outData+iChannel, sm.numOutputChannels, &zero, (float *)ioData->mBuffers[iBuffer].mData, thisNumChannels, inNumberFrames); + } + } + } + else if ( sm.numBytesPerSample == 2 ) // then we need to convert SInt16 -> Float (and also scale) + { + float scale = (float)INT16_MAX; + vDSP_vsmul(sm.outData, 1, &scale, sm.outData, 1, inNumberFrames*sm.numOutputChannels); + + for (int iBuffer=0; iBuffer < ioData->mNumberBuffers; ++iBuffer) { + + int thisNumChannels = ioData->mBuffers[iBuffer].mNumberChannels; + + for (int iChannel = 0; iChannel < thisNumChannels; ++iChannel) { + vDSP_vfix16(sm.outData+iChannel, sm.numOutputChannels, (SInt16 *)ioData->mBuffers[iBuffer].mData+iChannel, thisNumChannels, inNumberFrames); + } + } + + } + + return noErr; + +} + +#pragma mark - Audio Session Listeners +#if defined (USING_IOS) +void sessionPropertyListener(void * inClientData, + AudioSessionPropertyID inID, + UInt32 inDataSize, + const void * inData){ + + + if (inID == kAudioSessionProperty_AudioRouteChange) + { + Novocaine *sm = (Novocaine *)inClientData; + [sm checkSessionProperties]; + } + +} + +- (void)checkAudioSource { + // Check what the incoming audio route is. + UInt32 propertySize = sizeof(CFStringRef); + CFStringRef route; + CheckError( AudioSessionGetProperty(kAudioSessionProperty_AudioRoute, &propertySize, &route), "Couldn't check the audio route"); + self.inputRoute = (NSString *)route; + CFRelease(route); + NSLog(@"AudioRoute: %@", self.inputRoute); + + + // Check if there's input available. + // TODO: check if checking for available input is redundant. + // Possibly there's a different property ID change? + UInt32 isInputAvailable = 0; + UInt32 size = sizeof(isInputAvailable); + CheckError( AudioSessionGetProperty(kAudioSessionProperty_AudioInputAvailable, + &size, + &isInputAvailable), "Couldn't check if input is available"); + self.inputAvailable = (BOOL)isInputAvailable; + NSLog(@"Input available? %d", self.inputAvailable); + +} + + +// To be run ONCE per session property change and once on initialization. +- (void)checkSessionProperties +{ + + // Check if there is input, and from where + [self checkAudioSource]; + + // Check the number of input channels. + // Find the number of channels + UInt32 size = sizeof(self.numInputChannels); + UInt32 newNumChannels; + CheckError( AudioSessionGetProperty(kAudioSessionProperty_CurrentHardwareInputNumberChannels, &size, &newNumChannels), "Checking number of input channels"); + self.numInputChannels = newNumChannels; + // self.numInputChannels = 1; + NSLog(@"We've got %lu input channels", self.numInputChannels); + + + // Check the number of input channels. + // Find the number of channels + CheckError( AudioSessionGetProperty(kAudioSessionProperty_CurrentHardwareOutputNumberChannels, &size, &newNumChannels), "Checking number of output channels"); + self.numOutputChannels = newNumChannels; + // self.numOutputChannels = 1; + NSLog(@"We've got %lu output channels", self.numOutputChannels); + + + // Get the hardware sampling rate. This is settable, but here we're only reading. + Float64 currentSamplingRate; + size = sizeof(currentSamplingRate); + CheckError( AudioSessionGetProperty(kAudioSessionProperty_CurrentHardwareSampleRate, &size, ¤tSamplingRate), "Checking hardware sampling rate"); + self.samplingRate = currentSamplingRate; + NSLog(@"Current sampling rate: %f", self.samplingRate); + +} + +void sessionInterruptionListener(void *inClientData, UInt32 inInterruption) { + + Novocaine *sm = (Novocaine *)inClientData; + + if (inInterruption == kAudioSessionBeginInterruption) { + NSLog(@"Begin interuption"); + sm.inputAvailable = NO; + } + else if (inInterruption == kAudioSessionEndInterruption) { + NSLog(@"End interuption"); + sm.inputAvailable = YES; + [sm play]; + } + +} + +#endif + + + + +#if defined ( USING_OSX ) + +// Checks the number of channels and sampling rate of the connected device. +- (void)checkDeviceProperties +{ + +} + +- (void)selectAudioDevice:(AudioDeviceID)deviceID +{ + +} + +#endif + + +#pragma mark - Convenience Methods +- (NSString *)applicationDocumentsDirectory { + return [NSSearchPathForDirectoriesInDomains(NSDocumentDirectory, NSUserDomainMask, YES) lastObject]; +} + + +@end + + + + + + + + diff --git a/Novocaine/RingBuffer.h b/Novocaine/RingBuffer.h new file mode 100755 index 0000000..1d5fce5 --- /dev/null +++ b/Novocaine/RingBuffer.h @@ -0,0 +1,73 @@ +// Copyright (c) 2012 Alex Wiltschko +// +// Permission is hereby granted, free of charge, to any person +// obtaining a copy of this software and associated documentation +// files (the "Software"), to deal in the Software without +// restriction, including without limitation the rights to use, +// copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the +// Software is furnished to do so, subject to the following +// conditions: +// +// The above copyright notice and this permission notice shall be +// included in all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES +// OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT +// HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +// WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +// OTHER DEALINGS IN THE SOFTWARE. + +// TODO: Error throwing if things go wrong. +#import +#import + +#define kMaxNumChannels 4 + +class RingBuffer { + +public: + RingBuffer() {}; + RingBuffer(SInt64 bufferLength, SInt64 numChannels); + ~RingBuffer(); + + void AddNewSInt16AudioBuffer(const AudioBuffer aBuffer); + void AddNewSInt16Data(const SInt16 *newData, const SInt64 numFrames, const SInt64 whichChannel); + void AddNewFloatData(const float *newData, const SInt64 numFrames, const SInt64 whichChannel = 0); + void AddNewDoubleData(const double *newData, const SInt64 numFrames, const SInt64 whichChannel = 0); + void AddNewInterleavedFloatData(const float *newData, const SInt64 numFrames, const SInt64 numChannelsHere); + void FetchInterleavedData(float *outData, SInt64 numFrames, SInt64 numChannels); + void FetchFreshData(float *outData, SInt64 numFrames, SInt64 whichChannel, SInt64 stride); + void FetchFreshData2(float *outData, SInt64 numFrames, SInt64 whichChannel, SInt64 stride); + + void FetchData(float *outData, SInt64 numFrames, SInt64 whichChannel, SInt64 stride); + SInt64 NumNewFrames(SInt64 lastReadFrame, int iChannel = 0); + SInt64 NumUnreadFrames(int iChannel = 0) {return mNumUnreadFrames[iChannel]; } + + SInt64 WriteHeadPosition(int aChannel = 0) { return mLastWrittenIndex[aChannel]; } + SInt64 ReadHeadPosition(int aChannel = 0) { return mLastReadIndex[aChannel]; } + + void SeekWriteHeadPosition(SInt64 offset, int iChannel=0); + void SeekReadHeadPosition(SInt64 offset, int iChannel=0); + SInt64 NumChannels() {return mNumChannels; } + + void Clear(); + + // Analytics + float Mean(const SInt64 whichChannel = 0); + float Max(const SInt64 whichChannel = 0); + float Min(const SInt64 whichChannel = 0); + +private: + SInt64 mLastWrittenIndex[kMaxNumChannels]; + SInt64 mLastReadIndex[kMaxNumChannels]; + SInt64 mNumUnreadFrames[kMaxNumChannels]; + SInt64 mSizeOfBuffer; + SInt64 mNumChannels; + float **mData; + bool mAllocated; + +}; \ No newline at end of file diff --git a/Novocaine/RingBuffer.mm b/Novocaine/RingBuffer.mm new file mode 100755 index 0000000..753d6d6 --- /dev/null +++ b/Novocaine/RingBuffer.mm @@ -0,0 +1,296 @@ +// Copyright (c) 2012 Alex Wiltschko +// +// Permission is hereby granted, free of charge, to any person +// obtaining a copy of this software and associated documentation +// files (the "Software"), to deal in the Software without +// restriction, including without limitation the rights to use, +// copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the +// Software is furnished to do so, subject to the following +// conditions: +// +// The above copyright notice and this permission notice shall be +// included in all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES +// OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT +// HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +// WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +// OTHER DEALINGS IN THE SOFTWARE. + + +#include "RingBuffer.h" + + +RingBuffer::RingBuffer(SInt64 bufferLength, SInt64 numChannels) : +mSizeOfBuffer(bufferLength) +{ + + if (numChannels > kMaxNumChannels) + mNumChannels = kMaxNumChannels; + else if (numChannels <= 0) + mNumChannels = 1; + else + mNumChannels = numChannels; + + mData = (float **)calloc(numChannels, sizeof(float *)); + for (int i=0; i < numChannels; ++i) { + mData[i] = (float *)calloc(bufferLength, sizeof(float)); + mLastWrittenIndex[i] = 0; + mLastReadIndex[i] = 0; + mNumUnreadFrames[i] = 0; + } + +} + +RingBuffer::~RingBuffer() +{ + for (int i=0; i= mSizeOfBuffer) mNumUnreadFrames[iChannel] = mSizeOfBuffer; + } + + +} + +void RingBuffer::AddNewSInt16Data(const SInt16 *newData, const SInt64 numFrames, const SInt64 whichChannel) +{ + SInt64 idx; + for (int i=0; i < numFrames; ++i) { + idx = (i + mLastWrittenIndex[whichChannel]) % (mSizeOfBuffer); + mData[whichChannel][idx] = (float)newData[i]; + } + + mLastWrittenIndex[whichChannel] = (mLastWrittenIndex[whichChannel] + numFrames) % (mSizeOfBuffer); + mNumUnreadFrames[whichChannel] = mNumUnreadFrames[whichChannel] + numFrames; + if (mNumUnreadFrames[whichChannel] >= mSizeOfBuffer) mNumUnreadFrames[whichChannel] = mSizeOfBuffer; +} + +void RingBuffer::AddNewFloatData(const float *newData, const SInt64 numFrames, const SInt64 whichChannel) +{ + + SInt64 idx; + for (int i=0; i < numFrames; ++i) { + idx = (i + mLastWrittenIndex[whichChannel]) % (mSizeOfBuffer); + mData[whichChannel][idx] = newData[i]; + } + + mLastWrittenIndex[whichChannel] = (mLastWrittenIndex[whichChannel] + numFrames) % (mSizeOfBuffer); + mNumUnreadFrames[whichChannel] = mNumUnreadFrames[whichChannel] + numFrames; + if (mNumUnreadFrames[whichChannel] >= mSizeOfBuffer) mNumUnreadFrames[whichChannel] = mSizeOfBuffer; +} + +void RingBuffer::AddNewDoubleData(const double *newData, const SInt64 numFrames, const SInt64 whichChannel) +{ + + SInt64 idx; + for (int i=0; i < numFrames; ++i) { + idx = (i + mLastWrittenIndex[whichChannel]) % (mSizeOfBuffer); + mData[whichChannel][idx] = (float)newData[i]; + } + + mLastWrittenIndex[whichChannel] = (mLastWrittenIndex[whichChannel] + numFrames) % (mSizeOfBuffer); + mNumUnreadFrames[whichChannel] = mNumUnreadFrames[whichChannel] + numFrames; + if (mNumUnreadFrames[whichChannel] >= mSizeOfBuffer) mNumUnreadFrames[whichChannel] = mSizeOfBuffer; +} + +void RingBuffer::AddNewInterleavedFloatData(const float *newData, const SInt64 numFrames, const SInt64 numChannelsHere) +{ + + int numChannelsToCopy = (numChannelsHere <= mNumChannels) ? numChannelsHere : mNumChannels; + float zero = 0.0f; + + for (int iChannel = 0; iChannel < numChannelsToCopy; ++iChannel) { + + if (numFrames + mLastWrittenIndex[iChannel] < mSizeOfBuffer) { // if our new set of samples won't overrun the edge of the buffer + vDSP_vsadd((float *)&newData[iChannel], + numChannelsHere, + &zero, + &mData[iChannel][mLastWrittenIndex[iChannel]], + 1, + numFrames); + } + + else { // if we will overrun, then we need to do two separate copies. + int numSamplesInFirstCopy = mSizeOfBuffer - mLastWrittenIndex[iChannel]; + int numSamplesInSecondCopy = numFrames - numSamplesInFirstCopy; + + vDSP_vsadd((float *)&newData[iChannel], + numChannelsHere, + &zero, + &mData[iChannel][mLastWrittenIndex[iChannel]], + 1, + numSamplesInFirstCopy); + + vDSP_vsadd((float *)&newData[numSamplesInFirstCopy*numChannelsHere + iChannel], + numChannelsHere, + &zero, + &mData[iChannel][0], + 1, + numSamplesInSecondCopy); + } + + mLastWrittenIndex[iChannel] = (mLastWrittenIndex[iChannel] + numFrames) % (mSizeOfBuffer); + mNumUnreadFrames[iChannel] = (mNumUnreadFrames[iChannel] + numFrames); + if (mNumUnreadFrames[iChannel] >= mSizeOfBuffer) mNumUnreadFrames[iChannel] = mSizeOfBuffer; + } + + +} + +void RingBuffer::FetchFreshData2(float *outData, SInt64 numFrames, SInt64 whichChannel, SInt64 stride) +{ + + if (mLastWrittenIndex[whichChannel] - numFrames >= 0) { // if we're requesting samples that won't go off the left end of the ring buffer, then go ahead and copy them all out. + + UInt32 idx = mLastWrittenIndex[whichChannel] - numFrames; + float zero = 0.0f; + vDSP_vsadd(&mData[whichChannel][idx], + 1, + &zero, + outData, + stride, + numFrames); + } + + else { // if we will overrun, then we need to do two separate copies. + + // The copy that bleeds off the left, and cycles back to the right of the ring buffer + int numSamplesInFirstCopy = numFrames - mLastWrittenIndex[whichChannel]; + // The copy that starts at the beginning, and proceeds to the end. + int numSamplesInSecondCopy = mLastWrittenIndex[whichChannel]; + + + float zero = 0.0f; + UInt32 firstIndex = mSizeOfBuffer - numSamplesInFirstCopy; + vDSP_vsadd(&mData[whichChannel][firstIndex], + 1, + &zero, + &outData[0], + stride, + numSamplesInFirstCopy); + + vDSP_vsadd(&mData[whichChannel][0], + 1, + &zero, + &outData[numSamplesInFirstCopy*stride], + stride, + numSamplesInSecondCopy); + + } + +} + +void RingBuffer::FetchData(float *outData, SInt64 numFrames, SInt64 whichChannel, SInt64 stride) +{ + int idx; + for (int i=0; i < numFrames; ++i) { + idx = (mLastReadIndex[whichChannel] + i) % (mSizeOfBuffer); + outData[i*stride] = mData[whichChannel][idx]; + } + + mLastReadIndex[whichChannel] = (mLastReadIndex[whichChannel] + numFrames) % (mSizeOfBuffer); + + mNumUnreadFrames[whichChannel] -= numFrames; + if (mNumUnreadFrames[whichChannel] <= 0) mNumUnreadFrames[whichChannel] = 0; + +} + +void RingBuffer::FetchInterleavedData(float *outData, SInt64 numFrames, SInt64 numChannels) +{ + for (int iChannel=0; iChannel < numChannels; ++iChannel) { + FetchData(&outData[iChannel], numFrames, iChannel, numChannels); + } + +} + +void RingBuffer::FetchFreshData(float *outData, SInt64 numFrames, SInt64 whichChannel, SInt64 stride) +{ + + int idx; + for (int i=0; i < numFrames; ++i) { + idx = (mLastWrittenIndex[whichChannel] - numFrames + i) % (mSizeOfBuffer); + outData[i*stride] = mData[whichChannel][idx]; + } + + mLastReadIndex[whichChannel] = mLastWrittenIndex[whichChannel]; + mNumUnreadFrames[whichChannel] = 0; // Reading at the front of the buffer resets old data +} + +void RingBuffer::SeekWriteHeadPosition(SInt64 offset, int iChannel) +{ + mLastWrittenIndex[iChannel] = (mLastWrittenIndex[iChannel] + offset) % (mSizeOfBuffer); +} + +void RingBuffer::SeekReadHeadPosition(SInt64 offset, int iChannel) +{ + mLastReadIndex[iChannel] = (mLastReadIndex[iChannel] + offset) % (mSizeOfBuffer); +} + + +SInt64 RingBuffer::NumNewFrames(SInt64 lastReadFrame, int iChannel) +{ + int numNewFrames = mLastWrittenIndex[iChannel] - lastReadFrame; + if (numNewFrames < 0) numNewFrames += mSizeOfBuffer; + + return (SInt64)numNewFrames; +} + +#pragma mark - Analytics +float RingBuffer::Mean(const SInt64 whichChannel) +{ + float mean; + vDSP_meanv(mData[whichChannel],1,&mean,mSizeOfBuffer); + return mean; +} + + +float RingBuffer::Max(const SInt64 whichChannel) +{ + float max; + vDSP_maxv(mData[whichChannel],1,&max,mSizeOfBuffer); + return max; +} + + +float RingBuffer::Min(const SInt64 whichChannel) +{ + float min; + vDSP_minv(mData[whichChannel],1,&min,mSizeOfBuffer); + return min; +} + + +void RingBuffer::Clear() +{ + for (int i=0; i < mNumChannels; ++i) { + memset(mData[i], 0, sizeof(float)*mSizeOfBuffer); + mLastWrittenIndex[i] = 0; + mLastReadIndex[i] = 0; + } + +} + diff --git a/OscilloscopeView.xcodeproj/project.pbxproj b/OscilloscopeView.xcodeproj/project.pbxproj index de7d8d6..169f969 100644 --- a/OscilloscopeView.xcodeproj/project.pbxproj +++ b/OscilloscopeView.xcodeproj/project.pbxproj @@ -7,6 +7,10 @@ objects = { /* Begin PBXBuildFile section */ + 5CC6EADF165A31FA00AF1CEF /* AudioFileReader.mm in Sources */ = {isa = PBXBuildFile; fileRef = 5CC6EAD8165A31FA00AF1CEF /* AudioFileReader.mm */; }; + 5CC6EAE0165A31FA00AF1CEF /* AudioFileWriter.m in Sources */ = {isa = PBXBuildFile; fileRef = 5CC6EADA165A31FA00AF1CEF /* AudioFileWriter.m */; }; + 5CC6EAE1165A31FA00AF1CEF /* Novocaine.m in Sources */ = {isa = PBXBuildFile; fileRef = 5CC6EADC165A31FA00AF1CEF /* Novocaine.m */; }; + 5CC6EAE2165A31FA00AF1CEF /* RingBuffer.mm in Sources */ = {isa = PBXBuildFile; fileRef = 5CC6EADE165A31FA00AF1CEF /* RingBuffer.mm */; }; E205881015F235BC002FF41F /* UIKit.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = E205880F15F235BC002FF41F /* UIKit.framework */; }; E205881215F235BC002FF41F /* Foundation.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = E205881115F235BC002FF41F /* Foundation.framework */; }; E205881415F235BC002FF41F /* CoreGraphics.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = E205881315F235BC002FF41F /* CoreGraphics.framework */; }; @@ -20,14 +24,6 @@ E205883015F235BC002FF41F /* Foundation.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = E205881115F235BC002FF41F /* Foundation.framework */; }; E205883815F235BC002FF41F /* InfoPlist.strings in Resources */ = {isa = PBXBuildFile; fileRef = E205883615F235BC002FF41F /* InfoPlist.strings */; }; E205883B15F235BC002FF41F /* OscilloscopeViewTests.mm in Sources */ = {isa = PBXBuildFile; fileRef = E205883A15F235BC002FF41F /* OscilloscopeViewTests.mm */; }; - E20588AA15F24C11002FF41F /* AudioFileReader.mm in Sources */ = {isa = PBXBuildFile; fileRef = E20588A315F24C11002FF41F /* AudioFileReader.mm */; }; - E20588AB15F24C11002FF41F /* AudioFileReader.mm in Sources */ = {isa = PBXBuildFile; fileRef = E20588A315F24C11002FF41F /* AudioFileReader.mm */; }; - E20588AC15F24C11002FF41F /* AudioFileWriter.m in Sources */ = {isa = PBXBuildFile; fileRef = E20588A515F24C11002FF41F /* AudioFileWriter.m */; }; - E20588AD15F24C11002FF41F /* AudioFileWriter.m in Sources */ = {isa = PBXBuildFile; fileRef = E20588A515F24C11002FF41F /* AudioFileWriter.m */; }; - E20588AE15F24C11002FF41F /* Novocaine.m in Sources */ = {isa = PBXBuildFile; fileRef = E20588A715F24C11002FF41F /* Novocaine.m */; }; - E20588AF15F24C11002FF41F /* Novocaine.m in Sources */ = {isa = PBXBuildFile; fileRef = E20588A715F24C11002FF41F /* Novocaine.m */; }; - E20588B015F24C11002FF41F /* RingBuffer.mm in Sources */ = {isa = PBXBuildFile; fileRef = E20588A915F24C11002FF41F /* RingBuffer.mm */; }; - E20588B115F24C11002FF41F /* RingBuffer.mm in Sources */ = {isa = PBXBuildFile; fileRef = E20588A915F24C11002FF41F /* RingBuffer.mm */; }; E20588B315F24C35002FF41F /* CoreAudio.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = E20588B215F24C35002FF41F /* CoreAudio.framework */; }; E20588B515F24C3D002FF41F /* AudioToolbox.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = E20588B415F24C3D002FF41F /* AudioToolbox.framework */; }; E20588B715F24C52002FF41F /* Accelerate.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = E20588B615F24C52002FF41F /* Accelerate.framework */; }; @@ -53,6 +49,14 @@ /* End PBXContainerItemProxy section */ /* Begin PBXFileReference section */ + 5CC6EAD7165A31FA00AF1CEF /* AudioFileReader.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = AudioFileReader.h; sourceTree = ""; }; + 5CC6EAD8165A31FA00AF1CEF /* AudioFileReader.mm */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.objcpp; path = AudioFileReader.mm; sourceTree = ""; }; + 5CC6EAD9165A31FA00AF1CEF /* AudioFileWriter.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = AudioFileWriter.h; sourceTree = ""; }; + 5CC6EADA165A31FA00AF1CEF /* AudioFileWriter.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = AudioFileWriter.m; sourceTree = ""; }; + 5CC6EADB165A31FA00AF1CEF /* Novocaine.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = Novocaine.h; sourceTree = ""; }; + 5CC6EADC165A31FA00AF1CEF /* Novocaine.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = Novocaine.m; sourceTree = ""; }; + 5CC6EADD165A31FA00AF1CEF /* RingBuffer.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = RingBuffer.h; sourceTree = ""; }; + 5CC6EADE165A31FA00AF1CEF /* RingBuffer.mm */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.objcpp; path = RingBuffer.mm; sourceTree = ""; }; E205880B15F235BC002FF41F /* OscilloscopeView.app */ = {isa = PBXFileReference; explicitFileType = wrapper.application; includeInIndex = 0; path = OscilloscopeView.app; sourceTree = BUILT_PRODUCTS_DIR; }; E205880F15F235BC002FF41F /* UIKit.framework */ = {isa = PBXFileReference; lastKnownFileType = wrapper.framework; name = UIKit.framework; path = System/Library/Frameworks/UIKit.framework; sourceTree = SDKROOT; }; E205881115F235BC002FF41F /* Foundation.framework */ = {isa = PBXFileReference; lastKnownFileType = wrapper.framework; name = Foundation.framework; path = System/Library/Frameworks/Foundation.framework; sourceTree = SDKROOT; }; @@ -72,14 +76,6 @@ E205883715F235BC002FF41F /* en */ = {isa = PBXFileReference; lastKnownFileType = text.plist.strings; name = en; path = en.lproj/InfoPlist.strings; sourceTree = ""; }; E205883915F235BC002FF41F /* OscilloscopeViewTests.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = OscilloscopeViewTests.h; sourceTree = ""; }; E205883A15F235BC002FF41F /* OscilloscopeViewTests.mm */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.cpp.objcpp; path = OscilloscopeViewTests.mm; sourceTree = ""; }; - E20588A215F24C11002FF41F /* AudioFileReader.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = AudioFileReader.h; sourceTree = ""; }; - E20588A315F24C11002FF41F /* AudioFileReader.mm */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.objcpp; path = AudioFileReader.mm; sourceTree = ""; }; - E20588A415F24C11002FF41F /* AudioFileWriter.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = AudioFileWriter.h; sourceTree = ""; }; - E20588A515F24C11002FF41F /* AudioFileWriter.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = AudioFileWriter.m; sourceTree = ""; }; - E20588A615F24C11002FF41F /* Novocaine.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = Novocaine.h; sourceTree = ""; }; - E20588A715F24C11002FF41F /* Novocaine.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = Novocaine.m; sourceTree = ""; }; - E20588A815F24C11002FF41F /* RingBuffer.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = RingBuffer.h; sourceTree = ""; }; - E20588A915F24C11002FF41F /* RingBuffer.mm */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.objcpp; path = RingBuffer.mm; sourceTree = ""; }; E20588B215F24C35002FF41F /* CoreAudio.framework */ = {isa = PBXFileReference; lastKnownFileType = wrapper.framework; name = CoreAudio.framework; path = System/Library/Frameworks/CoreAudio.framework; sourceTree = SDKROOT; }; E20588B415F24C3D002FF41F /* AudioToolbox.framework */ = {isa = PBXFileReference; lastKnownFileType = wrapper.framework; name = AudioToolbox.framework; path = System/Library/Frameworks/AudioToolbox.framework; sourceTree = SDKROOT; }; E20588B615F24C52002FF41F /* Accelerate.framework */ = {isa = PBXFileReference; lastKnownFileType = wrapper.framework; name = Accelerate.framework; path = System/Library/Frameworks/Accelerate.framework; sourceTree = SDKROOT; }; @@ -122,11 +118,26 @@ /* End PBXFrameworksBuildPhase section */ /* Begin PBXGroup section */ + 5CC6EAD6165A31FA00AF1CEF /* Novocaine */ = { + isa = PBXGroup; + children = ( + 5CC6EAD7165A31FA00AF1CEF /* AudioFileReader.h */, + 5CC6EAD8165A31FA00AF1CEF /* AudioFileReader.mm */, + 5CC6EAD9165A31FA00AF1CEF /* AudioFileWriter.h */, + 5CC6EADA165A31FA00AF1CEF /* AudioFileWriter.m */, + 5CC6EADB165A31FA00AF1CEF /* Novocaine.h */, + 5CC6EADC165A31FA00AF1CEF /* Novocaine.m */, + 5CC6EADD165A31FA00AF1CEF /* RingBuffer.h */, + 5CC6EADE165A31FA00AF1CEF /* RingBuffer.mm */, + ); + path = Novocaine; + sourceTree = ""; + }; E205880015F235BB002FF41F = { isa = PBXGroup; children = ( E2BD40431601F97900234916 /* .gitignore */, - E20588A115F24C11002FF41F /* Novocaine */, + 5CC6EAD6165A31FA00AF1CEF /* Novocaine */, E205881515F235BC002FF41F /* OscilloscopeView */, E205883315F235BC002FF41F /* OscilloscopeViewTests */, E205880E15F235BC002FF41F /* Frameworks */, @@ -203,22 +214,6 @@ name = "Supporting Files"; sourceTree = ""; }; - E20588A115F24C11002FF41F /* Novocaine */ = { - isa = PBXGroup; - children = ( - E20588A215F24C11002FF41F /* AudioFileReader.h */, - E20588A315F24C11002FF41F /* AudioFileReader.mm */, - E20588A415F24C11002FF41F /* AudioFileWriter.h */, - E20588A515F24C11002FF41F /* AudioFileWriter.m */, - E20588A615F24C11002FF41F /* Novocaine.h */, - E20588A715F24C11002FF41F /* Novocaine.m */, - E20588A815F24C11002FF41F /* RingBuffer.h */, - E20588A915F24C11002FF41F /* RingBuffer.mm */, - ); - name = Novocaine; - path = ../Novocaine; - sourceTree = ""; - }; /* End PBXGroup section */ /* Begin PBXNativeTarget section */ @@ -329,11 +324,11 @@ E205881C15F235BC002FF41F /* main.m in Sources */, E205882015F235BC002FF41F /* AppDelegate.mm in Sources */, E205882315F235BC002FF41F /* ViewController.mm in Sources */, - E20588AA15F24C11002FF41F /* AudioFileReader.mm in Sources */, - E20588AC15F24C11002FF41F /* AudioFileWriter.m in Sources */, - E20588AE15F24C11002FF41F /* Novocaine.m in Sources */, - E20588B015F24C11002FF41F /* RingBuffer.mm in Sources */, E2BD405016021A1900234916 /* OscilloscopeView.mm in Sources */, + 5CC6EADF165A31FA00AF1CEF /* AudioFileReader.mm in Sources */, + 5CC6EAE0165A31FA00AF1CEF /* AudioFileWriter.m in Sources */, + 5CC6EAE1165A31FA00AF1CEF /* Novocaine.m in Sources */, + 5CC6EAE2165A31FA00AF1CEF /* RingBuffer.mm in Sources */, ); runOnlyForDeploymentPostprocessing = 0; }; @@ -342,10 +337,6 @@ buildActionMask = 2147483647; files = ( E205883B15F235BC002FF41F /* OscilloscopeViewTests.mm in Sources */, - E20588AB15F24C11002FF41F /* AudioFileReader.mm in Sources */, - E20588AD15F24C11002FF41F /* AudioFileWriter.m in Sources */, - E20588AF15F24C11002FF41F /* Novocaine.m in Sources */, - E20588B115F24C11002FF41F /* RingBuffer.mm in Sources */, E2BD405116021A1900234916 /* OscilloscopeView.mm in Sources */, ); runOnlyForDeploymentPostprocessing = 0; diff --git a/OscilloscopeView/OscilloscopeView.mm b/OscilloscopeView/OscilloscopeView.mm index f2e7c89..ae3ae54 100644 --- a/OscilloscopeView/OscilloscopeView.mm +++ b/OscilloscopeView/OscilloscopeView.mm @@ -43,7 +43,7 @@ - (id)initWithFrame:(CGRect)frame if (self) { self.color = [UIColor greenColor]; self.thickness = 2.0; - self.pixelStride = 1.0; + self.pixelStride = [[UIScreen mainScreen] scale]; self.audioManager = [Novocaine audioManager]; self.audioManager.numInputChannels = 1; @@ -90,25 +90,20 @@ - (void)refreshWithData:(float *)data numFrames:(UInt32)numFrames numChannels:(U - (void)drawRect:(CGRect)rect { - float w = self.bounds.size.width; - float h = self.bounds.size.height; + float w = rect.size.width; + float h = rect.size.height; float h2 = h/2; float hScale = h2 / 1.25; - float wScale = _numFrames / w; CGContextRef ctx = UIGraphicsGetCurrentContext(); - CGContextClearRect(ctx, self.bounds); + CGContextClearRect(ctx, rect); if (_numFrames > 0) { - - NSLog(@"width %f, frames %lu, stride %f", w, _numFrames, _pixelStride); - // make the path CGContextBeginPath(ctx); CGContextMoveToPoint(ctx, 0, h2 + _data[0] * hScale); - for (float t = 1; t < w; t += _pixelStride) { - NSLog(@"point %f, %f", t, h2 + _data[(int)(t * wScale)*_numChannels] * hScale); - CGContextAddLineToPoint(ctx, t, h2 + _data[(int)(t * wScale)*_numChannels] * hScale); + for (int t = 1; t < w; t += _pixelStride) { + CGContextAddLineToPoint(ctx, t, h2 + _data[(int)t*_numChannels] * hScale); } // stroke it diff --git a/OscilloscopeView/en.lproj/ViewController.xib b/OscilloscopeView/en.lproj/ViewController.xib index e7178e4..f63d719 100644 --- a/OscilloscopeView/en.lproj/ViewController.xib +++ b/OscilloscopeView/en.lproj/ViewController.xib @@ -1,14 +1,14 @@ - 1280 - 11C25 - 1919 - 1138.11 - 566.00 + 1536 + 12C60 + 2844 + 1187.34 + 625.00 com.apple.InterfaceBuilder.IBCocoaTouchPlugin - 916 + 1930 IBProxyObject @@ -33,7 +33,7 @@ 274 - {{0, 20}, {320, 460}} + {320, 460} @@ -44,7 +44,6 @@ NO - IBCocoaTouchFramework @@ -114,6 +113,6 @@ IBCocoaTouchFramework YES 3 - 916 + 1930