问题描述
这是我的代码:我使用此代码通过 Audio Unit 录制 iPhone 输出音频然后将输出保存在 output.caf 但 output.caf 文件为空任何人都知道我该怎么做?输出音频文件为空
这是初始化音频单元
-(void) 初始化OutputUnit{OSStatus 状态;//描述音频组件AudioComponentDescription 描述;desc.componentType = kAudioUnitType_Output;desc.componentSubType = kAudioUnitSubType_RemoteIO;desc.componentFlags = 0;desc.componentFlagsMask = 0;desc.componentManufacturer = kAudioUnitManufacturer_Apple;//获取组件AudioComponent inputComponent = AudioComponentFindNext(NULL, &desc);//获取音频单元状态 = AudioComponentInstanceNew(inputComponent, &audioUnit);//启用 IO 进行录制UInt32 标志 = 1;状态 = AudioUnitSetProperty(audioUnit,kAudioOutputUnitProperty_EnableIO,kAudioUnitScope_Input,kInputBus,&国旗,大小(标志));//启用 IO 进行播放状态 = AudioUnitSetProperty(audioUnit,kAudioOutputUnitProperty_EnableIO,kAudioUnitScope_Output,k输出总线,&国旗,大小(标志));//描述格式AudioStreamBasicDescription audioFormat={0};音频格式.mSampleRate = 44100.00;audioFormat.mFormatID = kAudioFormatLinearPCM;audioFormat.mFormatFlags = kAudioFormatFlagIsSignedInteger |kAudioFormatFlagIsPacked;audioFormat.mFramesPerPacket = 1;audioFormat.mChannelsPerFrame = 1;audioFormat.mBitsPerChannel = 16;audioFormat.mBytesPerPacket = 2;audioFormat.mBytesPerFrame = 2;//应用格式状态 = AudioUnitSetProperty(audioUnit,kAudioUnitProperty_StreamFormat,kAudioUnitScope_Output,kInputBus,&音频格式,大小(音频格式));状态 = AudioUnitSetProperty(audioUnit,kAudioUnitProperty_StreamFormat,kAudioUnitScope_Input,k输出总线,&音频格式,大小(音频格式));//设置输入回调AURenderCallbackStruct 回调结构;callbackStruct.inputProc = 录音回调;callbackStruct.inputProcRefCon = self;状态 = AudioUnitSetProperty(audioUnit,kAudioOutputUnitProperty_SetInputCallback,kAudioUnitScope_Global,kInputBus,&回调结构,sizeof(callbackStruct));//设置输出回调callbackStruct.inputProc = 播放回调;callbackStruct.inputProcRefCon = self;状态 = AudioUnitSetProperty(audioUnit,kAudioUnitProperty_SetRenderCallback,kAudioUnitScope_Global,k输出总线,&回调结构,sizeof(callbackStruct));//禁用记录器的缓冲区分配(可选 - 如果我们想传入我们自己的,请执行此操作)标志 = 0;状态 = AudioUnitSetProperty(audioUnit,kAudioUnitProperty_ShouldAllocateBuffer,kAudioUnitScope_Output,kInputBus,&国旗,大小(标志));音频单元初始化(音频单元);音频输出单元开始(音频单元);//初始化 le fichier 音频NSArray *paths = NSSearchPathForDirectoriesInDomains(NSDocumentDirectory, NSUserDomainMask, YES);NSString *documentsDirectory = [paths objectAtIndex:0];NSString *destinationFilePath = [[[NSString alloc] initWithFormat: @"%@/output.caf", documentsDirectory] autorelease];NSLog(@">>>%@",destinationFilePath);CFURLRef 目的地URL = CFURLCreateWithFileSystemPath(kCFAllocatorDefault, (CFStringRef)destinationFilePath, kCFURLPOSIXPathStyle, false);OSStatus setupErr = ExtAudioFileCreateWithURL(destinationURL, kAudioFileWAVEType, &audioFormat, NULL, kAudioFileFlags_EraseFile, &effectState.audioFileRef);CFRelease(destinationURL);NSAssert(setupErr == noErr, @"无法创建写入文件");setupErr = ExtAudioFileSetProperty(effectState.audioFileRef, kExtAudioFileProperty_ClientDataFormat, sizeof(AudioStreamBasicDescription), &audioFormat);NSAssert(setupErr == noErr, @"无法为格式创建文件");setupErr = ExtAudioFileWriteAsync(effectState.audioFileRef, 0, NULL);NSAssert(setupErr == noErr, @"无法初始化音频文件的写入缓冲区");}
录音回调
静态 OSStatus recordingCallback (void * inRefCon,AudioUnitRenderActionFlags * ioActionFlags,const AudioTimeStamp * inTimeStamp,UInt32 inBusNumber,UInt32 inNumberFrames,音频缓冲区列表 * ioData) {NSLog(@"回调");if (*ioActionFlags == kAudioUnitRenderAction_PostRender&&inBusNumber==0){AudioBufferList *bufferList;//<- 用缓冲区填充它(你会想要 malloc 它,因为它是一个动态长度列表)EffectState *effectState = (EffectState *)inRefCon;AudioUnit rioUnit =[(MixerHostAudio*)inRefCon getAudioUnit];OSStatus 状态;NSLog(@"de5eal el 回调");//下面我得到了错误状态 = AudioUnitRender(rioUnit,ioActionFlags,在时间戳,inBusNumber,inNumberFrames,缓冲区列表);if (noErr != status) { NSLog(@"AudioUnitRender error");返回无错误;}//现在,我们刚刚读取的样本位于 bufferList 的缓冲区中ExtAudioFileWriteAsync(effectState->audioFileRef, inNumberFrames, bufferList);}返回无错误;}//然后停止录制-(无效)停止记录{音频输出单元停止(音频单元);AudioUnitUninitialize(audioUnit);}
在 initializaeOutputUnit 你只创建了你的音频文件:
OSStatus setupErr = ExtAudioFileCreateWithURL(destinationURL, kAudioFileWAVEType, &audioFormat, NULL, kAudioFileFlags_EraseFile, &effectState.audioFileRef);
通过传递 0(帧)和 NULL(音频缓冲区)仅用于初始化内部缓冲区:
setupErr = ExtAudioFileWriteAsync(effectState.audioFileRef, 0, NULL);
这就是 recordingCallback 的问题所在:
1) ioActionFlags 始终为 0,inBusNumber 始终为 1,因为这就是您设置回调的方式 (kInputBus = 1):
if (*ioActionFlags == kAudioUnitRenderAction_PostRender&&inBusNumber==0)
所以只需删除 if 语句.
2) 从 AudioUnitRender 您将收到 -50 错误,它在 CoreAudioTypes.h 中定义为 kAudio_ParamError 错误.发生这种情况的原因是 bufferList 未定义且为 NULL!
<块引用> OSStatus 状态;状态 = AudioUnitRender(这个->mAudioUnit,ioActionFlags,在时间戳,kInputBus,inNumberFrames,&缓冲区列表);if (noErr != status) {printf("AudioUnitRender 错误:%ld", status);返回无错误;}
您只需要定义一个有效的 AudioBuffer 并将其传递给 AudioUnitRender,这是我的工作 RenderCallback:
<块引用> 静态 OSStatus recordingCallback (void * inRefCon,AudioUnitRenderActionFlags * ioActionFlags,const AudioTimeStamp * inTimeStamp,UInt32 inBusNumber,UInt32 inNumberFrames,音频缓冲区列表 * ioData) {双倍 timeInSeconds = inTimeStamp->mSampleTime/kSampleRate;printf("
%fs inBusNumber: %lu inNumberFrames: %lu ", timeInSeconds, inBusNumber, inNumberFrames);//printAudioUnitRenderActionFlags(ioActionFlags);AudioBufferList 缓冲区列表;SInt16 个样本[inNumberFrames];//足够大的大小,不必担心缓冲区溢出memset (&samples, 0, sizeof (samples));bufferList.mNumberBuffers = 1;bufferList.mBuffers[0].mData = 样本;bufferList.mBuffers[0].mNumberChannels = 1;bufferList.mBuffers[0].mDataByteSize = inNumberFrames*sizeof(SInt16);ViewController* THIS = THIS = (__bridge ViewController *)inRefCon;OSStatus 状态;状态 = AudioUnitRender(这个->mAudioUnit,ioActionFlags,在时间戳,kInputBus,inNumberFrames,&缓冲区列表);if (noErr != status) {printf("AudioUnitRender 错误:%ld", status);返回无错误;}//现在,我们刚刚读取的样本位于 bufferList 的缓冲区中ExtAudioFileWriteAsync(THIS->mAudioFileRef, inNumberFrames, &bufferList);返回无错误;}
在 stopRecord 中,您应该使用 ExtAudioFileDispose 关闭音频文件:
<块引用> - (void)stopRecording:(NSTimer*)theTimer {printf("
停止录制
");AudioOutputUnitStop(mAudioUnit);AudioUnitUninitialize(mAudioUnit);OSStatus 状态 = ExtAudioFileDispose(mAudioFileRef);printf("OSStatus(ExtAudioFileDispose): %ld
", status);}
完整源代码:http://pastebin.com/92Fyjaye
this is my code : i use this code to record the iPhone output audio by using Audio Unit then saving the output in output.caf but the output.caf file is empty any body have idea about what shall i do ? the output audio file is empty
this is intializing the audio unit
-(void) initializaeOutputUnit
{
OSStatus status;
// Describe audio component
AudioComponentDescription desc;
desc.componentType = kAudioUnitType_Output;
desc.componentSubType = kAudioUnitSubType_RemoteIO;
desc.componentFlags = 0;
desc.componentFlagsMask = 0;
desc.componentManufacturer = kAudioUnitManufacturer_Apple;
// Get component
AudioComponent inputComponent = AudioComponentFindNext(NULL, &desc);
// Get audio units
status = AudioComponentInstanceNew(inputComponent, &audioUnit);
// Enable IO for recording
UInt32 flag = 1;
status = AudioUnitSetProperty(audioUnit,
kAudioOutputUnitProperty_EnableIO,
kAudioUnitScope_Input,
kInputBus,
&flag,
sizeof(flag));
// Enable IO for playback
status = AudioUnitSetProperty(audioUnit,
kAudioOutputUnitProperty_EnableIO,
kAudioUnitScope_Output,
kOutputBus,
&flag,
sizeof(flag));
// Describe format
AudioStreamBasicDescription audioFormat={0};
audioFormat.mSampleRate = 44100.00;
audioFormat.mFormatID = kAudioFormatLinearPCM;
audioFormat.mFormatFlags = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked;
audioFormat.mFramesPerPacket = 1;
audioFormat.mChannelsPerFrame = 1;
audioFormat.mBitsPerChannel = 16;
audioFormat.mBytesPerPacket = 2;
audioFormat.mBytesPerFrame = 2;
// Apply format
status = AudioUnitSetProperty(audioUnit,
kAudioUnitProperty_StreamFormat,
kAudioUnitScope_Output,
kInputBus,
&audioFormat,
sizeof(audioFormat));
status = AudioUnitSetProperty(audioUnit,
kAudioUnitProperty_StreamFormat,
kAudioUnitScope_Input,
kOutputBus,
&audioFormat,
sizeof(audioFormat));
// Set input callback
AURenderCallbackStruct callbackStruct;
callbackStruct.inputProc = recordingCallback;
callbackStruct.inputProcRefCon = self;
status = AudioUnitSetProperty(audioUnit,
kAudioOutputUnitProperty_SetInputCallback,
kAudioUnitScope_Global,
kInputBus,
&callbackStruct,
sizeof(callbackStruct));
// Set output callback
callbackStruct.inputProc = playbackCallback;
callbackStruct.inputProcRefCon = self;
status = AudioUnitSetProperty(audioUnit,
kAudioUnitProperty_SetRenderCallback,
kAudioUnitScope_Global,
kOutputBus,
&callbackStruct,
sizeof(callbackStruct));
// Disable buffer allocation for the recorder (optional - do this if we want to pass in our own)
flag = 0;
status = AudioUnitSetProperty(audioUnit,
kAudioUnitProperty_ShouldAllocateBuffer,
kAudioUnitScope_Output,
kInputBus,
&flag,
sizeof(flag));
AudioUnitInitialize(audioUnit);
AudioOutputUnitStart(audioUnit);
// On initialise le fichier audio
NSArray *paths = NSSearchPathForDirectoriesInDomains(NSDocumentDirectory, NSUserDomainMask, YES);
NSString *documentsDirectory = [paths objectAtIndex:0];
NSString *destinationFilePath = [[[NSString alloc] initWithFormat: @"%@/output.caf", documentsDirectory] autorelease];
NSLog(@">>> %@", destinationFilePath);
CFURLRef destinationURL = CFURLCreateWithFileSystemPath(kCFAllocatorDefault, (CFStringRef)destinationFilePath, kCFURLPOSIXPathStyle, false);
OSStatus setupErr = ExtAudioFileCreateWithURL(destinationURL, kAudioFileWAVEType, &audioFormat, NULL, kAudioFileFlags_EraseFile, &effectState.audioFileRef);
CFRelease(destinationURL);
NSAssert(setupErr == noErr, @"Couldn't create file for writing");
setupErr = ExtAudioFileSetProperty(effectState.audioFileRef, kExtAudioFileProperty_ClientDataFormat, sizeof(AudioStreamBasicDescription), &audioFormat);
NSAssert(setupErr == noErr, @"Couldn't create file for format");
setupErr = ExtAudioFileWriteAsync(effectState.audioFileRef, 0, NULL);
NSAssert(setupErr == noErr, @"Couldn't initialize write buffers for audio file");
}
the recording call back
static OSStatus recordingCallback (void * inRefCon,
AudioUnitRenderActionFlags * ioActionFlags,
const AudioTimeStamp * inTimeStamp,
UInt32 inBusNumber,
UInt32 inNumberFrames,
AudioBufferList * ioData) {
NSLog(@"callback");
if (*ioActionFlags == kAudioUnitRenderAction_PostRender&&inBusNumber==0)
{
AudioBufferList *bufferList; // <- Fill this up with buffers (you will want to malloc it, as it's a dynamic-length list)
EffectState *effectState = (EffectState *)inRefCon;
AudioUnit rioUnit =[(MixerHostAudio*)inRefCon getAudioUnit];
OSStatus status;
NSLog(@"de5eal el call back ");
// BELOW I GET THE ERROR
status = AudioUnitRender( rioUnit,
ioActionFlags,
inTimeStamp,
inBusNumber,
inNumberFrames,
bufferList);
if (noErr != status) { NSLog(@"AudioUnitRender error"); return noErr;}
// Now, we have the samples we just read sitting in buffers in bufferList
ExtAudioFileWriteAsync(effectState->audioFileRef, inNumberFrames, bufferList);
}
return noErr;
}
// then stop Recording
- (void) stopRecord
{
AudioOutputUnitStop(audioUnit);
AudioUnitUninitialize(audioUnit);
}
In initializaeOutputUnit you only created your audio file:
OSStatus setupErr = ExtAudioFileCreateWithURL(destinationURL, kAudioFileWAVEType, &audioFormat, NULL, kAudioFileFlags_EraseFile, &effectState.audioFileRef);
by passing 0 (frames) and NULL (audiobuffer) is just for init internal buffers:
setupErr = ExtAudioFileWriteAsync(effectState.audioFileRef, 0, NULL);
That's what's going wrong in recordingCallback:
1) ioActionFlags are always 0 and inBusNumber are always 1, because thats how you setup your callback (kInputBus = 1):
if (*ioActionFlags == kAudioUnitRenderAction_PostRender&&inBusNumber==0)
so just remove the if statement.
2) From AudioUnitRender you will receive -50 error, which is defined in CoreAudioTypes.h as an kAudio_ParamError error. This happens by bufferList is not defined and NULL!
OSStatus status; status = AudioUnitRender(THIS->mAudioUnit, ioActionFlags, inTimeStamp, kInputBus, inNumberFrames, &bufferList); if (noErr != status) { printf("AudioUnitRender error: %ld", status); return noErr; }
You just need to define an valid AudioBuffer and pass it to AudioUnitRender, this is my working RenderCallback:
static OSStatus recordingCallback (void * inRefCon, AudioUnitRenderActionFlags * ioActionFlags, const AudioTimeStamp * inTimeStamp, UInt32 inBusNumber, UInt32 inNumberFrames, AudioBufferList * ioData) { double timeInSeconds = inTimeStamp->mSampleTime / kSampleRate; printf(" %fs inBusNumber: %lu inNumberFrames: %lu ", timeInSeconds, inBusNumber, inNumberFrames); //printAudioUnitRenderActionFlags(ioActionFlags); AudioBufferList bufferList; SInt16 samples[inNumberFrames]; // A large enough size to not have to worry about buffer overrun memset (&samples, 0, sizeof (samples)); bufferList.mNumberBuffers = 1; bufferList.mBuffers[0].mData = samples; bufferList.mBuffers[0].mNumberChannels = 1; bufferList.mBuffers[0].mDataByteSize = inNumberFrames*sizeof(SInt16); ViewController* THIS = THIS = (__bridge ViewController *)inRefCon; OSStatus status; status = AudioUnitRender(THIS->mAudioUnit, ioActionFlags, inTimeStamp, kInputBus, inNumberFrames, &bufferList); if (noErr != status) { printf("AudioUnitRender error: %ld", status); return noErr; } // Now, we have the samples we just read sitting in buffers in bufferList ExtAudioFileWriteAsync(THIS->mAudioFileRef, inNumberFrames, &bufferList); return noErr; }
In stopRecord you should close the audio file with ExtAudioFileDispose:
- (void)stopRecording:(NSTimer*)theTimer { printf(" stopRecording "); AudioOutputUnitStop(mAudioUnit); AudioUnitUninitialize(mAudioUnit); OSStatus status = ExtAudioFileDispose(mAudioFileRef); printf("OSStatus(ExtAudioFileDispose): %ld ", status); }
Full source code: http://pastebin.com/92Fyjaye
这篇关于谁能帮我通过音频单元录制 iPhone 输出的声音的文章就介绍到这了,希望我们推荐的答案对大家有所帮助,也希望大家多多支持编程学习网!