How to generate audio wave form programmatically while recording Voice in iOS?
You should check out EZAudio (https://github.com/syedhali/EZAudio), specifically the EZRecorder and the EZAudioPlot (or GPU-accelerated EZAudioPlotGL).
There is also an example project that does exactly what you want, https://github.com/syedhali/EZAudio/tree/master/EZAudioExamples/iOS/EZAudioRecordExample
EDIT: Here's the code inline
/// In your interface
/**
Use a OpenGL based plot to visualize the data coming in
*/
@property (nonatomic,weak) IBOutlet EZAudioPlotGL *audioPlot;
/**
The microphone component
*/
@property (nonatomic,strong) EZMicrophone *microphone;
/**
The recorder component
*/
@property (nonatomic,strong) EZRecorder *recorder;
...
/// In your implementation
// Create an instance of the microphone and tell it to use this view controller instance as the delegate
-(void)viewDidLoad {
self.microphone = [EZMicrophone microphoneWithDelegate:self startsImmediately:YES];
}
// EZMicrophoneDelegate will provide these callbacks
-(void)microphone:(EZMicrophone *)microphone
hasAudioReceived:(float **)buffer
withBufferSize:(UInt32)bufferSize
withNumberOfChannels:(UInt32)numberOfChannels {
dispatch_async(dispatch_get_main_queue(),^{
// Updates the audio plot with the waveform data
[self.audioPlot updateBuffer:buffer[0] withBufferSize:bufferSize];
});
}
-(void)microphone:(EZMicrophone *)microphone hasAudioStreamBasicDescription:(AudioStreamBasicDescription)audioStreamBasicDescription {
// The AudioStreamBasicDescription of the microphone stream. This is useful when configuring the EZRecorder or telling another component what audio format type to expect.
// We can initialize the recorder with this ASBD
self.recorder = [EZRecorder recorderWithDestinationURL:[self testFilePathURL]
andSourceFormat:audioStreamBasicDescription];
}
-(void)microphone:(EZMicrophone *)microphone
hasBufferList:(AudioBufferList *)bufferList
withBufferSize:(UInt32)bufferSize
withNumberOfChannels:(UInt32)numberOfChannels {
// Getting audio data as a buffer list that can be directly fed into the EZRecorder. This is happening on the audio thread - any UI updating needs a GCD main queue block. This will keep appending data to the tail of the audio file.
if( self.isRecording ){
[self.recorder appendDataFromBufferList:bufferList
withBufferSize:bufferSize];
}
}