How to decode AAC audio buffer to PCM buffer in iOS? [closed]

最后都变了- 提交于 2019-11-27 14:33:31

I have sample code to do it.

At start you should configure in/out ASBD (AudioStreamBasicDescription) and create converter:

- (void)setupAudioConverter{
    AudioStreamBasicDescription outFormat;
    memset(&outFormat, 0, sizeof(outFormat));
    outFormat.mSampleRate       = 44100;
    outFormat.mFormatID         = kAudioFormatLinearPCM;
    outFormat.mFormatFlags      = kLinearPCMFormatFlagIsSignedInteger;
    outFormat.mBytesPerPacket   = 2;
    outFormat.mFramesPerPacket  = 1;
    outFormat.mBytesPerFrame    = 2;
    outFormat.mChannelsPerFrame = 1;
    outFormat.mBitsPerChannel   = 16;
    outFormat.mReserved         = 0;


    AudioStreamBasicDescription inFormat;
    memset(&inFormat, 0, sizeof(inFormat));
    inFormat.mSampleRate        = 44100;
    inFormat.mFormatID          = kAudioFormatMPEG4AAC;
    inFormat.mFormatFlags       = kMPEG4Object_AAC_LC;
    inFormat.mBytesPerPacket    = 0;
    inFormat.mFramesPerPacket   = 1024;
    inFormat.mBytesPerFrame     = 0;
    inFormat.mChannelsPerFrame  = 1;
    inFormat.mBitsPerChannel    = 0;
    inFormat.mReserved          = 0;

    OSStatus status =  AudioConverterNew(&inFormat, &outFormat, &_audioConverter);
    if (status != 0) {
        printf("setup converter error, status: %i\n", (int)status);
    }
}

After that you should make callback function for audio converter:

struct PassthroughUserData {
    UInt32 mChannels;
    UInt32 mDataSize;
    const void* mData;
    AudioStreamPacketDescription mPacket;
};


OSStatus inInputDataProc(AudioConverterRef aAudioConverter,
                         UInt32* aNumDataPackets /* in/out */,
                         AudioBufferList* aData /* in/out */,
                         AudioStreamPacketDescription** aPacketDesc,
                         void* aUserData)
{

    PassthroughUserData* userData = (PassthroughUserData*)aUserData;
    if (!userData->mDataSize) {
        *aNumDataPackets = 0;
        return kNoMoreDataErr;
    }

    if (aPacketDesc) {
        userData->mPacket.mStartOffset = 0;
        userData->mPacket.mVariableFramesInPacket = 0;
        userData->mPacket.mDataByteSize = userData->mDataSize;
        *aPacketDesc = &userData->mPacket;
    }

    aData->mBuffers[0].mNumberChannels = userData->mChannels;
    aData->mBuffers[0].mDataByteSize = userData->mDataSize;
    aData->mBuffers[0].mData = const_cast<void*>(userData->mData);

    // No more data to provide following this run.
    userData->mDataSize = 0;

    return noErr;
}

And method for frame decoding:

- (void)decodeAudioFrame:(NSData *)frame withPts:(NSInteger)pts{
    if(!_audioConverter){
        [self setupAudioConverter];
    }

    PassthroughUserData userData = { 1, (UInt32)frame.length, [frame bytes]};
    NSMutableData *decodedData = [NSMutableData new];

    const uint32_t MAX_AUDIO_FRAMES = 128;
    const uint32_t maxDecodedSamples = MAX_AUDIO_FRAMES * 1;

    do{
        uint8_t *buffer = (uint8_t *)malloc(maxDecodedSamples * sizeof(short int));
        AudioBufferList decBuffer;
        decBuffer.mNumberBuffers = 1;
        decBuffer.mBuffers[0].mNumberChannels = 1;
        decBuffer.mBuffers[0].mDataByteSize = maxDecodedSamples * sizeof(short int);
        decBuffer.mBuffers[0].mData = buffer;

        UInt32 numFrames = MAX_AUDIO_FRAMES;

        AudioStreamPacketDescription outPacketDescription;
        memset(&outPacketDescription, 0, sizeof(AudioStreamPacketDescription));
        outPacketDescription.mDataByteSize = MAX_AUDIO_FRAMES;
        outPacketDescription.mStartOffset = 0;
        outPacketDescription.mVariableFramesInPacket = 0;

        OSStatus rv = AudioConverterFillComplexBuffer(_audioConverter,
                                                      inInputDataProc,
                                                      &userData,
                                                      &numFrames /* in/out */,
                                                      &decBuffer,
                                                      &outPacketDescription);

        if (rv && rv != kNoMoreDataErr) {
            NSLog(@"Error decoding audio stream: %d\n", rv);
            break;
        }

        if (numFrames) {
            [decodedData appendBytes:decBuffer.mBuffers[0].mData length:decBuffer.mBuffers[0].mDataByteSize];
        }

        if (rv == kNoMoreDataErr) {
            break;
        }

    }while (true);

    //void *pData = (void *)[decodedData bytes];
    //audioRenderer->Render(&pData, decodedData.length, pts);
}

You need to use Core Audio. Look for Core Audio Overview in the Apple documentation.

标签
易学教程内所有资源均来自网络或用户发布的内容,如有违反法律规定的内容欢迎反馈
该文章没有解决你所遇到的问题?点击提问,说说你的问题,让更多的人一起探讨吧!