Setting up effect audio units for CoreAudio - objective-c

I'm trying to setup a high-pass filter but AUGraphStart gives me -10863 when I try. I cannot find much documntation at all. Here is my attent to set up the filter:
- (void)initializeAUGraph{
AUNode outputNode;
AUNode mixerNode;
AUNode effectNode;
NewAUGraph(&mGraph);
// Create AudioComponentDescriptions for the AUs we want in the graph
// mixer component
AudioComponentDescription mixer_desc;
mixer_desc.componentType = kAudioUnitType_Mixer;
mixer_desc.componentSubType = kAudioUnitSubType_AU3DMixerEmbedded;
mixer_desc.componentFlags = 0;
mixer_desc.componentFlagsMask = 0;
mixer_desc.componentManufacturer = kAudioUnitManufacturer_Apple;
// output component
AudioComponentDescription output_desc;
output_desc.componentType = kAudioUnitType_Output;
output_desc.componentSubType = kAudioUnitSubType_RemoteIO;
output_desc.componentFlags = 0;
output_desc.componentFlagsMask = 0;
output_desc.componentManufacturer = kAudioUnitManufacturer_Apple;
//effect component
AudioComponentDescription effect_desc;
effect_desc.componentType = kAudioUnitType_Effect;
effect_desc.componentSubType = kAudioUnitSubType_HighPassFilter;
effect_desc.componentFlags = 0;
effect_desc.componentFlagsMask = 0;
effect_desc.componentManufacturer = kAudioUnitManufacturer_Apple;
// Add nodes to the graph to hold our AudioUnits
AUGraphAddNode(mGraph, &output_desc, &outputNode);
AUGraphAddNode(mGraph, &mixer_desc, &mixerNode);
AUGraphAddNode(mGraph, &effect_desc, &effectNode);
// Connect the nodes
AUGraphConnectNodeInput(mGraph, mixerNode, 0, effectNode, 0);
AUGraphConnectNodeInput(mGraph, effectNode, 0, outputNode, 0);
//Open Graph
AUGraphOpen(mGraph);
// Get a link to the mixer AU
AUGraphNodeInfo(mGraph, mixerNode, NULL, &mMixer);
// Get a link to the effect AU
AUGraphNodeInfo(mGraph, effectNode, NULL, &mEffect);
//Setup buses
size_t numbuses = track_count;
UInt32 size = sizeof(numbuses);
AudioUnitSetProperty(mMixer, kAudioUnitProperty_ElementCount, kAudioUnitScope_Input, 0, &numbuses, size);
//Setup Stream Format Data
AudioStreamBasicDescription desc;
size = sizeof(desc);
// Setup Stream Format
desc.mSampleRate = kGraphSampleRate;
desc.mFormatID = kAudioFormatLinearPCM;
desc.mFormatFlags = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked;
desc.mBitsPerChannel = sizeof(AudioSampleType) * 8; // AudioSampleType == 16 bit signed ints
desc.mChannelsPerFrame = 1;
desc.mFramesPerPacket = 1;
desc.mBytesPerFrame = sizeof(AudioSampleType);
desc.mBytesPerPacket = desc.mBytesPerFrame;
// Loop through and setup a callback for each source you want to send to the mixer.
for (int i = 0; i < numbuses; ++i) {
// Setup render callback struct
AURenderCallbackStruct renderCallbackStruct;
renderCallbackStruct.inputProc = &renderInput;
renderCallbackStruct.inputProcRefCon = self;
// Connect the callback to the mixer input channel
AUGraphSetNodeInputCallback(mGraph, mixerNode, i, &renderCallbackStruct);
// Apply Stream Data
AudioUnitSetProperty(mMixer, kAudioUnitProperty_StreamFormat,kAudioUnitScope_Input,i,&desc,size);
AudioUnitSetParameter(mMixer, k3DMixerParam_Distance, kAudioUnitScope_Input, i, rand() % 6, 0);
rotation[i] = rand() % 360;
rotation_speed[i] = rand() % 5;
AudioUnitSetParameter(mMixer, k3DMixerParam_Azimuth, kAudioUnitScope_Input, i, rotation[i], 0);
AudioUnitSetParameter(mMixer, k3DMixerParam_Elevation, kAudioUnitScope_Input, i, 30, 0);
}
// Reset stream fromat data to 0
memset (&desc, 0, sizeof (desc));
// Setup output stream format
desc.mSampleRate = kGraphSampleRate;
// Apply Stream Data to Output
AudioUnitSetProperty(mEffect,kAudioUnitProperty_StreamFormat,kAudioUnitScope_Input,0,&desc,size);
AudioUnitSetProperty(mEffect,kAudioUnitProperty_StreamFormat,kAudioUnitScope_Output,0,&desc,size);
AudioUnitSetProperty(mMixer,kAudioUnitProperty_StreamFormat,kAudioUnitScope_Output,0,&desc,size);
//All done so initialise
AUGraphInitialize(mGraph);
}
It works when I remove the high pass filter. How do I get the filter working?
Thank you.
PS: Is the 3D elevation supposed to do nothing?

if u still have the problem...u should add a converter unit between the mixernode and the effect node and set the input format of it as the mixer output and the output format to the format u get from audiounitgetproperty (converternode)

Not all Audio Units that are available on OSX are available on iOS. In fact, only a few are. According to below documentation the highpassfilter effect is not supported on iOS : http://developer.apple.com/library/ios/#documentation/MusicAudio/Conceptual/AudioUnitHostingGuide_iOS/UsingSpecificAudioUnits/UsingSpecificAudioUnits.html#//apple_ref/doc/uid/TP40009492-CH17-SW1
"The iPod EQ unit (subtype kAudioUnitSubType_AUiPodEQ) is the only effect unit provided in iOS 4."
Note that it mentions iOS4. But I am unable to find any documentation on this for later versions of iOS.

Related

The way that copy data to a linear tiled image(not using stage buffer)when the format of image is VK_FORMAT_R8G8B8_UNORM seems not work correctly?

There are two ways that can copy data to image(using stage buffer or not).In the first way that using stage buffer, when the image format is VK_FORMAT_R8G8B8A8_UNORM or VK_FORMAT_R8G8B8_UNORM, it works correctly.But in the way that not using stage buffer, the image format is VK_FORMAT_R8G8B8A8_UNORM, it works well. While changing the format to VK_FORMAT_R8G8B8_UNORM, the result of sample is not correct.The source data can be assured correct when setting different image format .
The code used is from [https://github.com/SaschaWillems/Vulkan/blob/master/examples/texture/texture.cpp](https://www.stackoverflow.com/
if (0/*useStaging*/) {
// Copy data to an optimal tiled image
// This loads the texture data into a host local buffer that is copied to the optimal tiled image on the device
// Create a host-visible staging buffer that contains the raw image data
// This buffer will be the data source for copying texture data to the optimal tiled image on the device
VkBuffer stagingBuffer;
VkDeviceMemory stagingMemory;
VkBufferCreateInfo bufferCreateInfo = vks::initializers::bufferCreateInfo();
bufferCreateInfo.size = ktxTextureSize;
// This buffer is used as a transfer source for the buffer copy
bufferCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT;
bufferCreateInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
VK_CHECK_RESULT(vkCreateBuffer(device, &bufferCreateInfo, nullptr, &stagingBuffer));
// Get memory requirements for the staging buffer (alignment, memory type bits)
vkGetBufferMemoryRequirements(device, stagingBuffer, &memReqs);
memAllocInfo.allocationSize = memReqs.size;
// Get memory type index for a host visible buffer
memAllocInfo.memoryTypeIndex = vulkanDevice->getMemoryType(memReqs.memoryTypeBits, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT);
VK_CHECK_RESULT(vkAllocateMemory(device, &memAllocInfo, nullptr, &stagingMemory));
VK_CHECK_RESULT(vkBindBufferMemory(device, stagingBuffer, stagingMemory, 0));
// Copy texture data into host local staging buffer
uint8_t *data;
VK_CHECK_RESULT(vkMapMemory(device, stagingMemory, 0, memReqs.size, 0, (void **)&data));
memcpy(data, ktxTextureData, ktxTextureSize);
vkUnmapMemory(device, stagingMemory);
// Setup buffer copy regions for each mip level
std::vector<VkBufferImageCopy> bufferCopyRegions;
uint32_t offset = 0;
for (uint32_t i = 0; i < texture.mipLevels; i++) {
// Calculate offset into staging buffer for the current mip level
ktx_size_t offset;
KTX_error_code ret = ktxTexture_GetImageOffset(ktxTexture, i, 0, 0, &offset);
assert(ret == KTX_SUCCESS);
// Setup a buffer image copy structure for the current mip level
VkBufferImageCopy bufferCopyRegion = {};
bufferCopyRegion.imageSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
bufferCopyRegion.imageSubresource.mipLevel = i;
bufferCopyRegion.imageSubresource.baseArrayLayer = 0;
bufferCopyRegion.imageSubresource.layerCount = 1;
bufferCopyRegion.imageExtent.width = ktxTexture->baseWidth >> i;
bufferCopyRegion.imageExtent.height = ktxTexture->baseHeight >> i;
bufferCopyRegion.imageExtent.depth = 1;
bufferCopyRegion.bufferOffset = offset;
bufferCopyRegions.push_back(bufferCopyRegion);
}
// Create optimal tiled target image on the device
VkImageCreateInfo imageCreateInfo = vks::initializers::imageCreateInfo();
imageCreateInfo.imageType = VK_IMAGE_TYPE_2D;
imageCreateInfo.format = format;
imageCreateInfo.mipLevels = texture.mipLevels;
imageCreateInfo.arrayLayers = 1;
imageCreateInfo.samples = VK_SAMPLE_COUNT_1_BIT;
imageCreateInfo.tiling = VK_IMAGE_TILING_OPTIMAL;
imageCreateInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
// Set initial layout of the image to undefined
imageCreateInfo.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
imageCreateInfo.extent = { texture.width, texture.height, 1 };
imageCreateInfo.usage = VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_SAMPLED_BIT;
VK_CHECK_RESULT(vkCreateImage(device, &imageCreateInfo, nullptr, &texture.image));
vkGetImageMemoryRequirements(device, texture.image, &memReqs);
memAllocInfo.allocationSize = memReqs.size;
memAllocInfo.memoryTypeIndex = vulkanDevice->getMemoryType(memReqs.memoryTypeBits, VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT);
VK_CHECK_RESULT(vkAllocateMemory(device, &memAllocInfo, nullptr, &texture.deviceMemory));
VK_CHECK_RESULT(vkBindImageMemory(device, texture.image, texture.deviceMemory, 0));
VkCommandBuffer copyCmd = vulkanDevice->createCommandBuffer(VK_COMMAND_BUFFER_LEVEL_PRIMARY, true);
// Image memory barriers for the texture image
// The sub resource range describes the regions of the image that will be transitioned using the memory barriers below
VkImageSubresourceRange subresourceRange = {};
// Image only contains color data
subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
// Start at first mip level
subresourceRange.baseMipLevel = 0;
// We will transition on all mip levels
subresourceRange.levelCount = texture.mipLevels;
// The 2D texture only has one layer
subresourceRange.layerCount = 1;
// Transition the texture image layout to transfer target, so we can safely copy our buffer data to it.
VkImageMemoryBarrier imageMemoryBarrier = vks::initializers::imageMemoryBarrier();;
imageMemoryBarrier.image = texture.image;
imageMemoryBarrier.subresourceRange = subresourceRange;
imageMemoryBarrier.srcAccessMask = 0;
imageMemoryBarrier.dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
imageMemoryBarrier.oldLayout = VK_IMAGE_LAYOUT_UNDEFINED;
imageMemoryBarrier.newLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL;
// Insert a memory dependency at the proper pipeline stages that will execute the image layout transition
// Source pipeline stage is host write/read execution (VK_PIPELINE_STAGE_HOST_BIT)
// Destination pipeline stage is copy command execution (VK_PIPELINE_STAGE_TRANSFER_BIT)
vkCmdPipelineBarrier(
copyCmd,
VK_PIPELINE_STAGE_HOST_BIT,
VK_PIPELINE_STAGE_TRANSFER_BIT,
0,
0, nullptr,
0, nullptr,
1, &imageMemoryBarrier);
// Copy mip levels from staging buffer
vkCmdCopyBufferToImage(
copyCmd,
stagingBuffer,
texture.image,
VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
static_cast<uint32_t>(bufferCopyRegions.size()),
bufferCopyRegions.data());
// Once the data has been uploaded we transfer to the texture image to the shader read layout, so it can be sampled from
imageMemoryBarrier.srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
imageMemoryBarrier.dstAccessMask = VK_ACCESS_SHADER_READ_BIT;
imageMemoryBarrier.oldLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL;
imageMemoryBarrier.newLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
// Insert a memory dependency at the proper pipeline stages that will execute the image layout transition
// Source pipeline stage is copy command execution (VK_PIPELINE_STAGE_TRANSFER_BIT)
// Destination pipeline stage fragment shader access (VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT)
vkCmdPipelineBarrier(
copyCmd,
VK_PIPELINE_STAGE_TRANSFER_BIT,
VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT,
0,
0, nullptr,
0, nullptr,
1, &imageMemoryBarrier);
// Store current layout for later reuse
texture.imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
vulkanDevice->flushCommandBuffer(copyCmd, queue, true);
// Clean up staging resources
vkFreeMemory(device, stagingMemory, nullptr);
vkDestroyBuffer(device, stagingBuffer, nullptr);
} else {
// Copy data to a linear tiled image
VkImage mappableImage;
VkDeviceMemory mappableMemory;
// Load mip map level 0 to linear tiling image
VkImageCreateInfo imageCreateInfo = vks::initializers::imageCreateInfo();
imageCreateInfo.imageType = VK_IMAGE_TYPE_2D;
imageCreateInfo.format = format;
imageCreateInfo.mipLevels = 1;
imageCreateInfo.arrayLayers = 1;
imageCreateInfo.samples = VK_SAMPLE_COUNT_1_BIT;
imageCreateInfo.tiling = VK_IMAGE_TILING_LINEAR;
imageCreateInfo.usage = VK_IMAGE_USAGE_SAMPLED_BIT;
imageCreateInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
imageCreateInfo.initialLayout = VK_IMAGE_LAYOUT_PREINITIALIZED;
imageCreateInfo.extent = { texture.width, texture.height, 1 };
VK_CHECK_RESULT(vkCreateImage(device, &imageCreateInfo, nullptr, &mappableImage));
// Get memory requirements for this image like size and alignment
vkGetImageMemoryRequirements(device, mappableImage, &memReqs);
// Set memory allocation size to required memory size
memAllocInfo.allocationSize = memReqs.size;
// Get memory type that can be mapped to host memory
memAllocInfo.memoryTypeIndex = vulkanDevice->getMemoryType(memReqs.memoryTypeBits, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT);
VK_CHECK_RESULT(vkAllocateMemory(device, &memAllocInfo, nullptr, &mappableMemory));
VK_CHECK_RESULT(vkBindImageMemory(device, mappableImage, mappableMemory, 0));
// Map image memory
void *data;
VK_CHECK_RESULT(vkMapMemory(device, mappableMemory, 0, memReqs.size, 0, &data));
// Copy image data of the first mip level into memory
memcpy(data, ktxTextureData, memReqs.size);
vkUnmapMemory(device, mappableMemory);
// Linear tiled images don't need to be staged and can be directly used as textures
texture.image = mappableImage;
texture.deviceMemory = mappableMemory;
texture.imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
// Setup image memory barrier transfer image to shader read layout
VkCommandBuffer copyCmd = vulkanDevice->createCommandBuffer(VK_COMMAND_BUFFER_LEVEL_PRIMARY, true);
// The sub resource range describes the regions of the image we will be transition
VkImageSubresourceRange subresourceRange = {};
subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
subresourceRange.baseMipLevel = 0;
subresourceRange.levelCount = 1;
subresourceRange.layerCount = 1;
// Transition the texture image layout to shader read, so it can be sampled from
VkImageMemoryBarrier imageMemoryBarrier = vks::initializers::imageMemoryBarrier();;
imageMemoryBarrier.image = texture.image;
imageMemoryBarrier.subresourceRange = subresourceRange;
imageMemoryBarrier.srcAccessMask = VK_ACCESS_HOST_WRITE_BIT;
imageMemoryBarrier.dstAccessMask = VK_ACCESS_SHADER_READ_BIT;
imageMemoryBarrier.oldLayout = VK_IMAGE_LAYOUT_PREINITIALIZED;
imageMemoryBarrier.newLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
// Insert a memory dependency at the proper pipeline stages that will execute the image layout transition
// Source pipeline stage is host write/read execution (VK_PIPELINE_STAGE_HOST_BIT)
// Destination pipeline stage fragment shader access (VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT)
vkCmdPipelineBarrier(
copyCmd,
VK_PIPELINE_STAGE_HOST_BIT,
VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT,
0,
0, nullptr,
0, nullptr,
1, &imageMemoryBarrier);
vulkanDevice->flushCommandBuffer(copyCmd, queue, true);
}
)

generating of mipmaps using vkCmdBlitImage for cubemap textures

what should be the parameters of VkImageBlit.dstOffsets and VkImageBlit.srcOffsets when we are doing dynamic generation of mipmaps?
I am doing layer by layer and for each mipmap level but somewhere it is going wrong, mostly i think offsets. So i have data which has all the six faces with 0th mipmap level.
for(int j=0; j< bufferCopyRegions.size(); j++) {
for (int32_t i = 1; i < mipLevels; i++)
{
VkImageBlit imageBlit{};
// Source
imageBlit.srcSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
imageBlit.srcSubresource.layerCount = 1;
imageBlit.srcSubresource.mipLevel = 0;
imageBlit.srcOffsets[1].x = bitmapInfos[j].width;
imageBlit.srcOffsets[1].y = bitmapInfos[j].height;
imageBlit.srcOffsets[1].z = 1;
// Destination
imageBlit.dstSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
imageBlit.dstSubresource.layerCount = 1;
imageBlit.dstSubresource.mipLevel = i;
imageBlit.dstOffsets[1].x = int32_t(bitmapInfos[j].width >> (i) == 0 ? 1 : int32_t(bitmapInfos[j].width >> (i )));
imageBlit.dstOffsets[1].y = int32_t(bitmapInfos[j].height >> (i) == 0 ? 1 : int32_t(bitmapInfos[j].height >> (i)));
imageBlit.dstOffsets[1].z = 1;
VkImageMemoryBarrier imageMemoryBarrier = {};
imageMemoryBarrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER;
imageMemoryBarrier.pNext = NULL;
imageMemoryBarrier.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
imageMemoryBarrier.subresourceRange.baseMipLevel = i;
imageMemoryBarrier.subresourceRange.levelCount = 1;
imageMemoryBarrier.subresourceRange.baseArrayLayer = j;
imageMemoryBarrier.subresourceRange.layerCount = 1;
// change layout of current mip level to transfer dest
setImageLayout(imageMemoryBarrier,
blitCmd,
image,
VK_IMAGE_ASPECT_COLOR_BIT,
VK_IMAGE_LAYOUT_UNDEFINED,
VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, imageMemoryBarrier.subresourceRange,
VK_PIPELINE_STAGE_TRANSFER_BIT,
VK_PIPELINE_STAGE_HOST_BIT);
// Do blit operation from previous mip level
vkCmdBlitImage(blitCmd, image, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, image,
VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, &imageBlit, VK_FILTER_LINEAR);
setImageLayout(imageMemoryBarrier, blitCmd, image, VK_IMAGE_ASPECT_COLOR_BIT,
VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, imageMemoryBarrier.subresourceRange,
VK_PIPELINE_STAGE_HOST_BIT,
VK_PIPELINE_STAGE_TRANSFER_BIT);
}
}
I don't see baseArrayLayer of the imageBlit.srcSubresource and imageBlit.dstSubresource set to j. Which is probably your immediate problem.
Also your barriers seem bad to me. Only the top mip needs to be synchronized with host. But even so VK_PIPELINE_STAGE_HOST_BIT should not be necessary, because there is an exception for vkQueueSubmit saying it does this kind of synchronization implicitly if host writes ended before it being called (6.9. Host Write Ordering Guarantees and reminded in the Note in 6.1.3. Access Types).

Change Variable over Time Interval in Audio Callback

so I wanted to change my variable within my for loop over a time interval. In my code, I have my audio callback basically running the following pseudocode:
int start = 0, target = 100;
for (int i = 0; i < frames; i++) {
[object makeSineWave];
[object useNum:start];
if (target > start) {
// Increase start over the span of time frame
}
}
What I want to do is increase start to the target value over a logarithmic scale within a time interval (lets say 1 second to keep it simple). How would I keep track of time within the for loop of the audio callback?
EDIT: Guess I'm trying to make a filter sweep... I'm guessing it'd be similar to a sine sweep now that I think about it? more code-
OSStatus RenderTone(void *inRefCon,
AudioUnitRenderActionFlags *ioActionFlags,
const AudioTimeStamp *inTimeStamp,
UInt32 inBusNumber,
UInt32 inNumberFrames,
AudioBufferList *ioData) {
// Get Audio Data
AudioData *data = (__bridge AudioData *)inRefCon;
static Float32 phs = 0, sub_phs = 0;
Float32 freq = data->freq;
// Calculate phases
Float32 phs_incr = 2 * M_PI * freq / data->srate;
Float32 sample;
// Buffers
Float32 *bufL = (Float32 *)ioData->mBuffers[0].mData;
Float32 *bufR = (Float32 *)ioData->mBuffers[1].mData;
// Start at 1 hz, target is 500 hz
int start = 1, target = 500;
// Generate Samples
for (UInt32 i = 0; i < inNumberFrames; i++) {
// Sine waveform
sample = sinf(phs);
sample = [data->filter processFilter:sample fc:start];
// change start here using current time?
// should not increase to target immediately, over span of 1 second
bufL[i] = buf[R] = sample;
// Increment phase
phs += phs_incr;
// Wrap phase
phs = wrapPhase(phs);
}
}
Seems that I found out how to do it through looking at CCRMA's STK... great resource that I wish I found earlier!
https://ccrma.stanford.edu/software/stk/

CoreAudio AudioQueue callback function never called, no errors reported

I am trying to do a simple playback from a file functionality and it appears that my callback function is never called. It doesn't really make sense because all of the OSStatuses come back 0 and other numbers all appear correct as well (like the output packets read pointer from AudioFileReadPackets).
Here is the setup:
OSStatus stat;
stat = AudioFileOpenURL(
(CFURLRef)urlpath, kAudioFileReadPermission, 0, &aStreamData->aFile
);
UInt32 dsze = 0;
stat = AudioFileGetPropertyInfo(
aStreamData->aFile, kAudioFilePropertyDataFormat, &dsze, 0
);
stat = AudioFileGetProperty(
aStreamData->aFile, kAudioFilePropertyDataFormat, &dsze, &aStreamData->aDescription
);
stat = AudioQueueNewOutput(
&aStreamData->aDescription, bufferCallback, aStreamData, NULL, NULL, 0, &aStreamData->aQueue
);
aStreamData->pOffset = 0;
for(int i = 0; i < NUM_BUFFERS; i++) {
stat = AudioQueueAllocateBuffer(
aStreamData->aQueue, aStreamData->aDescription.mBytesPerPacket, &aStreamData->aBuffer[i]
);
bufferCallback(aStreamData, aStreamData->aQueue, aStreamData->aBuffer[i]);
}
stat = AudioQueuePrime(aStreamData->aQueue, 0, NULL);
stat = AudioQueueStart(aStreamData->aQueue, NULL);
(Not shown is where I'm checking the value of stat in between the functions, it just comes back normal.)
And the callback function:
void bufferCallback(void *uData, AudioQueueRef queue, AudioQueueBufferRef buffer) {
UInt32 bread = 0;
UInt32 pread = buffer->mAudioDataBytesCapacity / player->aStreamData->aDescription.mBytesPerPacket;
OSStatus stat;
stat = AudioFileReadPackets(
player->aStreamData->aFile, false, &bread, NULL, player->aStreamData->pOffset, &pread, buffer->mAudioData
);
buffer->mAudioDataByteSize = bread;
stat = AudioQueueEnqueueBuffer(queue, buffer, 0, NULL);
player->aStreamData->pOffset += pread;
}
Where aStreamData is my user data struct (typedefed so I can use it as a class property) and player is a static instance of the controlling Objective-C class. If any other code is wanted please let me know. I am a bit at my wit's end. Printing any of the numbers involved here yields the correct result, including functions in bufferCallback when I call it myself in the allocate loop. It just never gets called thereafter. The start up method returns and nothing happens.
Also anecdotally, I am using a peripheral device (an MBox Pro 3) to play the sound which CoreAudio only boots up when it is about to output. IE if I start iTunes or something, the speakers pop faintly and there is an LED that goes from blinking to solid. The device boots up like it does so CA is definitely doing something. (Also I've of course tried it with the onboard Macbook sound sans the device.)
I've read other solutions to problems that sound similiar and they don't work. Stuff like using multiple buffers which I am doing now and doesn't appear to make any difference.
I basically assume I am doing something obviously wrong somehow but not sure what it could be. I've read the relevant documentation, looked at the available code examples and scoured the net a bit for answers and it appears that this is all I need to do and it should just go.
At the very least, is there anything else I can do to investigate?
My first answer was not good enough, so I compiled a minimal example that will play a 2 channel, 16 bit wave file.
The main difference from your code is that I made a property listener listening for play start and stop events.
As for your code, it seems legit at first glance. Two things I will point out, though:
1. Is seems you are allocating buffers with TOO SMALL a buffer size. I have noticed that AudioQueues won't play if the buffers are too small, which seems to fit your problem.
2. Have you verified the properties returned?
Back to my code example:
Everything is hard coded, so it is not exactly good coding practice, but it shows how you can do it.
AudioStreamTest.h
#import <Foundation/Foundation.h>
#import <AudioToolbox/AudioToolbox.h>
uint32_t bufferSizeInSamples;
AudioFileID file;
UInt32 currentPacket;
AudioQueueRef audioQueue;
AudioQueueBufferRef buffer[3];
AudioStreamBasicDescription audioStreamBasicDescription;
#interface AudioStreamTest : NSObject
- (void)start;
- (void)stop;
#end
AudioStreamTest.m
#import "AudioStreamTest.h"
#implementation AudioStreamTest
- (id)init
{
self = [super init];
if (self) {
bufferSizeInSamples = 441;
file = NULL;
currentPacket = 0;
audioStreamBasicDescription.mBitsPerChannel = 16;
audioStreamBasicDescription.mBytesPerFrame = 4;
audioStreamBasicDescription.mBytesPerPacket = 4;
audioStreamBasicDescription.mChannelsPerFrame = 2;
audioStreamBasicDescription.mFormatFlags = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked;
audioStreamBasicDescription.mFormatID = kAudioFormatLinearPCM;
audioStreamBasicDescription.mFramesPerPacket = 1;
audioStreamBasicDescription.mReserved = 0;
audioStreamBasicDescription.mSampleRate = 44100;
}
return self;
}
- (void)start {
AudioQueueNewOutput(&audioStreamBasicDescription, AudioEngineOutputBufferCallback, (__bridge void *)(self), NULL, NULL, 0, &audioQueue);
AudioQueueAddPropertyListener(audioQueue, kAudioQueueProperty_IsRunning, AudioEnginePropertyListenerProc, NULL);
AudioQueueStart(audioQueue, NULL);
}
- (void)stop {
AudioQueueStop(audioQueue, YES);
AudioQueueRemovePropertyListener(audioQueue, kAudioQueueProperty_IsRunning, AudioEnginePropertyListenerProc, NULL);
}
void AudioEngineOutputBufferCallback(void *inUserData, AudioQueueRef inAQ, AudioQueueBufferRef inBuffer) {
if (file == NULL) return;
UInt32 bytesRead = bufferSizeInSamples * 4;
UInt32 packetsRead = bufferSizeInSamples;
AudioFileReadPacketData(file, false, &bytesRead, NULL, currentPacket, &packetsRead, inBuffer->mAudioData);
inBuffer->mAudioDataByteSize = bytesRead;
currentPacket += packetsRead;
if (bytesRead == 0) {
AudioQueueStop(inAQ, false);
}
else {
AudioQueueEnqueueBuffer(inAQ, inBuffer, 0, NULL);
}
}
void AudioEnginePropertyListenerProc (void *inUserData, AudioQueueRef inAQ, AudioQueuePropertyID inID) {
//We are only interested in the property kAudioQueueProperty_IsRunning
if (inID != kAudioQueueProperty_IsRunning) return;
//Get the status of the property
UInt32 isRunning = false;
UInt32 size = sizeof(isRunning);
AudioQueueGetProperty(inAQ, kAudioQueueProperty_IsRunning, &isRunning, &size);
if (isRunning) {
currentPacket = 0;
NSString *fileName = #"/Users/roy/Documents/XCodeProjectsData/FUZZ/03.wav";
NSURL *fileURL = [[NSURL alloc] initFileURLWithPath: fileName];
AudioFileOpenURL((__bridge CFURLRef) fileURL, kAudioFileReadPermission, 0, &file);
for (int i = 0; i < 3; i++){
AudioQueueAllocateBuffer(audioQueue, bufferSizeInSamples * 4, &buffer[i]);
UInt32 bytesRead = bufferSizeInSamples * 4;
UInt32 packetsRead = bufferSizeInSamples;
AudioFileReadPacketData(file, false, &bytesRead, NULL, currentPacket, &packetsRead, buffer[i]->mAudioData);
buffer[i]->mAudioDataByteSize = bytesRead;
currentPacket += packetsRead;
AudioQueueEnqueueBuffer(audioQueue, buffer[i], 0, NULL);
}
}
else {
if (file != NULL) {
AudioFileClose(file);
file = NULL;
for (int i = 0; i < 3; i++) {
AudioQueueFreeBuffer(audioQueue, buffer[i]);
buffer[i] = NULL;
}
}
}
}
-(void)dealloc {
[super dealloc];
AudioQueueDispose(audioQueue, true);
audioQueue = NULL;
}
#end
Lastly, I want to include some research I have done today to test the robustness of AudioQueues.
I have noticed that if you make too small AudioQueue buffers, it won't play at all. That made me play around a bit to see why it is not playing.
If I try buffer size that can hold only 150 samples, I get no sound at all.
If I try buffer size that can hold 175 samples, it plays the whole song through, but with A lot of distortion. 175 amounts to a tad less than 4 ms of audio.
AudioQueue keeps asking for new buffers as long as you keep supplying buffers. That is regardless of AudioQueue actually playing your buffers or not.
If you supply a buffer with size 0, the buffer will be lost and an error kAudioQueueErr_BufferEmpty is returned for that queue enqueue request. You will never see AudioQueue ask you to fill that buffer again. If this happened for the last queue you have posted, AudioQueue will stop asking you to fill any more buffers. In that case you will not hear any more audio for that session.
To see why AudioQueues is not playing anything with smaller buffer sizes, I made a test to see if my callback is called at all even when there is no sound. The answer is that the buffers gets called all the time as long as AudioQueues is playing and needs data.
So if you keep feeding buffers to the queue, no buffer is ever lost. It doesn't happen. Unless there is an error, of course.
So why is no sound playing?
I tested to see if 'AudioQueueEnqueueBuffer()' returned any errors. It did not. No other errors within my play routine either. The data returned from reading from file is also good.
Everything is normal, buffers are good, data re-enqueued is good, there is just no sound.
So my last test was to slowly increase buffer size till I could hear anything. I finally heard faint and sporadic distortion.
Then it came to me...
It seems that the problem lies with that the system tries to keep the stream in sync with time so if you enqueue audio, and the time for the audio you wanted to play has passed, it will just skip that part of the buffer. If the buffer size becomes too small, more and more data is dropped or skipped until the audio system is in sync again. Which is never if the buffer size is too small. (You can hear this as distortion if you chose a buffer size that is barely large enough to support continuous play.)
If you think about it, it is the only way the audio queue can work, but it is a good realisation when you are clueless like me and "discover" how it really works.
I decided to take a look at this again and was able to solve it by making the buffers larger. I've accepted the answer by #RoyGal since it was their suggestion but I wanted to provide the actual code that works since I guess others are having the same problem (question has a few favorites that aren't me at the moment).
One thing I tried was making the packet size larger:
aData->aDescription.mFramesPerPacket = 512; // or some other number
aData->aDescription.mBytesPerPacket = (
aData->aDescription.mFramesPerPacket * aData->aDescription.mBytesPerFrame
);
This does NOT work: it causes AudioQueuePrime to fail with an AudioConverterNew returned -50 message. I guess it wants mFramesPerPacket to be 1 for PCM.
(I also tried setting the kAudioQueueProperty_DecodeBufferSizeFrames property which didn't seem to do anything. Not sure what it's for.)
The solution seems to be to only allocate the buffer(s) with the specified size:
AudioQueueAllocateBuffer(
aData->aQueue,
aData->aDescription.mBytesPerPacket * N_BUFFER_PACKETS / N_BUFFERS,
&aData->aBuffer[i]
);
And the size has to be sufficiently large. I found the magic number is:
mBytesPerPacket * 1024 / N_BUFFERS
(Where N_BUFFERS is the number of buffers and should be > 1 or playback is choppy.)
Here is an MCVE demonstrating the issue and solution:
#import <Foundation/Foundation.h>
#import <AudioToolbox/AudioToolbox.h>
#import <AudioToolbox/AudioQueue.h>
#import <AudioToolbox/AudioFile.h>
#define N_BUFFERS 2
#define N_BUFFER_PACKETS 1024
typedef struct AStreamData {
AudioFileID aFile;
AudioQueueRef aQueue;
AudioQueueBufferRef aBuffer[N_BUFFERS];
AudioStreamBasicDescription aDescription;
SInt64 pOffset;
volatile BOOL isRunning;
} AStreamData;
void printASBD(AudioStreamBasicDescription* desc) {
printf("mSampleRate = %d\n", (int)desc->mSampleRate);
printf("mBytesPerPacket = %d\n", desc->mBytesPerPacket);
printf("mFramesPerPacket = %d\n", desc->mFramesPerPacket);
printf("mBytesPerFrame = %d\n", desc->mBytesPerFrame);
printf("mChannelsPerFrame = %d\n", desc->mChannelsPerFrame);
printf("mBitsPerChannel = %d\n", desc->mBitsPerChannel);
}
void bufferCallback(
void *vData, AudioQueueRef aQueue, AudioQueueBufferRef aBuffer
) {
AStreamData* aData = (AStreamData*)vData;
UInt32 bRead = 0;
UInt32 pRead = (
aBuffer->mAudioDataBytesCapacity / aData->aDescription.mBytesPerPacket
);
OSStatus stat;
stat = AudioFileReadPackets(
aData->aFile, false, &bRead, NULL, aData->pOffset, &pRead, aBuffer->mAudioData
);
if(stat != 0) {
printf("AudioFileReadPackets returned %d\n", stat);
}
if(pRead == 0) {
aData->isRunning = NO;
return;
}
aBuffer->mAudioDataByteSize = bRead;
stat = AudioQueueEnqueueBuffer(aQueue, aBuffer, 0, NULL);
if(stat != 0) {
printf("AudioQueueEnqueueBuffer returned %d\n", stat);
}
aData->pOffset += pRead;
}
AStreamData* beginPlayback(NSURL* path) {
static AStreamData* aData;
aData = malloc(sizeof(AStreamData));
OSStatus stat;
stat = AudioFileOpenURL(
(CFURLRef)path, kAudioFileReadPermission, 0, &aData->aFile
);
printf("AudioFileOpenURL returned %d\n", stat);
UInt32 dSize = 0;
stat = AudioFileGetPropertyInfo(
aData->aFile, kAudioFilePropertyDataFormat, &dSize, 0
);
printf("AudioFileGetPropertyInfo returned %d\n", stat);
stat = AudioFileGetProperty(
aData->aFile, kAudioFilePropertyDataFormat, &dSize, &aData->aDescription
);
printf("AudioFileGetProperty returned %d\n", stat);
printASBD(&aData->aDescription);
stat = AudioQueueNewOutput(
&aData->aDescription, bufferCallback, aData, NULL, NULL, 0, &aData->aQueue
);
printf("AudioQueueNewOutput returned %d\n", stat);
aData->pOffset = 0;
for(int i = 0; i < N_BUFFERS; i++) {
// change YES to NO for stale playback
if(YES) {
stat = AudioQueueAllocateBuffer(
aData->aQueue,
aData->aDescription.mBytesPerPacket * N_BUFFER_PACKETS / N_BUFFERS,
&aData->aBuffer[i]
);
} else {
stat = AudioQueueAllocateBuffer(
aData->aQueue,
aData->aDescription.mBytesPerPacket,
&aData->aBuffer[i]
);
}
printf(
"AudioQueueAllocateBuffer returned %d for aBuffer[%d] with capacity %d\n",
stat, i, aData->aBuffer[i]->mAudioDataBytesCapacity
);
bufferCallback(aData, aData->aQueue, aData->aBuffer[i]);
}
UInt32 numFramesPrepared = 0;
stat = AudioQueuePrime(aData->aQueue, 0, &numFramesPrepared);
printf("AudioQueuePrime returned %d with %d frames prepared\n", stat, numFramesPrepared);
stat = AudioQueueStart(aData->aQueue, NULL);
printf("AudioQueueStart returned %d\n", stat);
UInt32 pSize = sizeof(UInt32);
UInt32 isRunning;
stat = AudioQueueGetProperty(
aData->aQueue, kAudioQueueProperty_IsRunning, &isRunning, &pSize
);
printf("AudioQueueGetProperty returned %d\n", stat);
aData->isRunning = !!isRunning;
return aData;
}
void endPlayback(AStreamData* aData) {
OSStatus stat = AudioQueueStop(aData->aQueue, NO);
printf("AudioQueueStop returned %d\n", stat);
}
NSString* getPath() {
// change NO to YES and enter path to hard code
if(NO) {
return #"";
}
char input[512];
printf("Enter file path: ");
scanf("%[^\n]", input);
return [[NSString alloc] initWithCString:input encoding:NSASCIIStringEncoding];
}
int main(int argc, const char* argv[]) {
NSAutoreleasePool* pool = [[NSAutoreleasePool alloc] init];
NSURL* path = [NSURL fileURLWithPath:getPath()];
AStreamData* aData = beginPlayback(path);
if(aData->isRunning) {
do {
printf("Queue is running...\n");
[NSThread sleepForTimeInterval:1.0];
} while(aData->isRunning);
endPlayback(aData);
} else {
printf("Playback did not start\n");
}
[pool drain];
return 0;
}

ExtAudioFileRead gives different result in iOS 5

I'm trying to extract Linear PCM data from a MP3 file.
Before iOS 5, I could do it successfully using AudioToolbox framework, specifically ExtAudioFileRead function.
However, in iOS 5 the ExtAudioFileRead function gives completely different result from that in iOS 4.
First, it cannot read all the packets in the source MP3 file.
For example, it reads only 1637 packets while the source MP3 file has 2212 packets in total.
Second, the PCM values obtained from the function are completely different from those obtained in iOS 4.
I can't figure out what I did wrong :(
The same framework, the same function, and the same code... but completely different results?
I doubt it is a bug of iOS 5 so I reported the problem to Apple already.
But Apple has not answer to my bug reporting for 2 weeks!
Here's the code that causes the problem.
After executing the code, I expect to have the correct PCM data in pcmBuffer.
In iOS 4, the code gives the result what I expected to.
But in iOS 5, the result is completely different and wrong.
Please, somebody help me!
OSStatus status;
ExtAudioFileRef fileRef;
CFURLRef fileURL = (CFURLRef)[NSURL fileURLWithPath:filePath];
status = ExtAudioFileOpenURL((CFURLRef)fileURL, &fileRef);
AudioStreamBasicDescription dataFormat;
dataFormat.mSampleRate = SAMPLE_RATE;
dataFormat.mFormatID = kAudioFormatLinearPCM;
dataFormat.mFormatFlags = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked;
dataFormat.mFramesPerPacket = 1;
dataFormat.mChannelsPerFrame = 1;
dataFormat.mBitsPerChannel = 16;
dataFormat.mBytesPerPacket = 2;
dataFormat.mBytesPerFrame = 2;
UInt32 propDataSize;
AudioStreamBasicDescription originalDataFormat;
propDataSize = (UInt32)sizeof(originalDataFormat);
status = ExtAudioFileGetProperty(fileRef, kExtAudioFileProperty_FileDataFormat, &propDataSize, &originalDataFormat);
SInt64 numPackets;
propDataSize = sizeof(numPackets);
status = ExtAudioFileGetProperty(fileRef, kExtAudioFileProperty_FileLengthFrames, &propDataSize, &numPackets);
propDataSize = (UInt32)sizeof(dataFormat);
status = ExtAudioFileSetProperty(fileRef, kExtAudioFileProperty_ClientDataFormat, propDataSize, &dataFormat);
numPackets = (SInt64)numPackets / (SInt64)(originalDataFormat.mSampleRate / SAMPLE_RATE);
size_t bufferSize = (size_t)(numPackets * sizeof(SInt16));
SInt16 *pcmBuffer = (SInt16 *)malloc(bufferSize);
AudioBufferList bufList;
bufList.mNumberBuffers = 1;
bufList.mBuffers[0].mNumberChannels = 1;
bufList.mBuffers[0].mDataByteSize = bufferSize;
bufList.mBuffers[0].mData = pcmBuffer;
ExtAudioFileSeek(fileRef, 0);
UInt32 totalFramesRead = 0;
do {
UInt32 framesRead = numPackets - totalFramesRead;
bufList.mBuffers[0].mData = pcmBuffer + (totalFramesRead * (sizeof(SInt16)));
ExtAudioFileRead(fileRef, &framesRead, &bufList);
totalFramesRead += framesRead;
if(framesRead == 0) {
break;
}
NSLog(#"read %lu frames\n", framesRead);
} while (totalFramesRead < numPackets);
int totalPackets = totalFramesRead;
status = ExtAudioFileDispose(fileRef);
NSLog(#"numPackets : %lld, totalPackets : %d", numPackets, totalPackets);
Ouch. I noted that the number is different if the original sampling rate of the song is different. Back to square 1.