How can i save file using AVAudioPlayerNode? - objective-c

I make some effects to audio, using avaudioplayernode. And then i want to save the result. I try to do like this:
Output = engine.outputNode;
AudioStreamBasicDescription asbd;
memset(&asbd, 0, sizeof(asbd));
asbd.mSampleRate = SAMPLE_RATE;
asbd.mFormatID = kAudioFormatLinearPCM;
asbd.mFormatFlags = kAudioFormatFlagIsBigEndian | kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked;
asbd.mFormatFlags = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked;
asbd.mBitsPerChannel = 16;
asbd.mChannelsPerFrame = 2;
asbd.mFramesPerPacket = 1;
asbd.mBytesPerFrame = 4;
asbd.mBytesPerPacket = 4;
AudioFileID OutputFile;
CFStringRef inputFile = (__bridge CFStringRef)(save);
CFURLRef inputFileURL = CFURLCreateWithFileSystemPath(kCFAllocatorDefault, inputFile, kCFURLPOSIXPathStyle,false);
CheckError (AudioFileCreateWithURL(inputFileURL, kAudioFileWAVEType, &asbd, kAudioFileFlags_EraseFile, &OutputFile), "err");
CFRelease(inputFileURL);
float y = 6871947*16;//audio.length;
myData1 = [NSData dataWithBytesNoCopy:(__bridge void * _Nonnull)(Output) length:y freeWhenDone:NO];
NSUInteger length1 = [myData1 length]-4096;
UInt32 lgt = (UInt32) length1/4;
UInt16 *cdata = (UInt16*) malloc (length1);
[myData1 getBytes:(void*)cdata range:NSMakeRange(0, length1)];
CheckError ( AudioFileWritePackets(OutputFile, TRUE, lgt, NULL, 0, &lgt, cdata), "err");
AudioFileClose(OutputFile);
But saving file is wrong. I think that set Output to NSData is wrong step. But I don't know another way. And try to do like this. How can i save new file?

Related

Using ExtAudioFileWriteAsync() in callback function. Can't get to run

Just can't seem to get very far in Core Audio. My goal is to write captured audio data from an instrument unit to a file. I have set up a call to a callback function on an instrument unit with this:
CheckError(AudioUnitAddRenderNotify(player->instrumentUnit,
MyRenderProc,
&player),
"AudioUnitAddRenderNotify Failed");
I set up the file and AudioStreamBasicDescription with this:
#define FILENAME #"output_IV.aif"
NSString *fileName = FILENAME; // [NSString stringWithFormat:FILENAME_FORMAT, hz];
NSString *filePath = [[[NSFileManager defaultManager] currentDirectoryPath] stringByAppendingPathComponent: fileName];
NSURL *fileURL = [NSURL fileURLWithPath: filePath];
NSLog (#"path: %#", fileURL);
AudioStreamBasicDescription asbd;
memset(&asbd, 0, sizeof(asbd));
asbd.mSampleRate = 44100.0;
asbd.mFormatID = kAudioFormatLinearPCM;
asbd.mFormatFlags = kAudioFormatFlagIsBigEndian | kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked;
asbd.mChannelsPerFrame = 2; // CHANGED FROM 1 (STEREO)
asbd.mFramesPerPacket = 1;
asbd.mBitsPerChannel = 16;
asbd.mBytesPerFrame = 4;
asbd.mBytesPerPacket = 4;
CheckError(ExtAudioFileCreateWithURL((__bridge CFURLRef)fileURL, kAudioFileAIFFType, &asbd, NULL, kAudioFileFlags_EraseFile, &testRecordFile), "ExtAudioFileCreateWithURL failed");
CheckError(ExtAudioFileSetProperty(testRecordFile, kExtAudioFileProperty_ClientDataFormat, (UInt32)sizeof(asbd), &asbd), "ExtAudioFileSetProperty failed");
CheckError(ExtAudioFileWriteAsync(testRecordFile, 0, NULL), "ExtAudioFileWriteAsync 1st time failed");
I verified that the file does get created. testRecordFile is defined globally (it's the only way I could get things to run at the moment):
ExtAudioFileRef testRecordFile;
My callback function is:
OSStatus MyRenderProc(void *inRefCon,
AudioUnitRenderActionFlags *ioActionFlags,
const AudioTimeStamp *inTimeStamp,
UInt32 inBusNumber,
UInt32 inNumberFrames,
AudioBufferList * ioData)
{
if (*ioActionFlags & kAudioUnitRenderAction_PostRender){
static int TEMP_kAudioUnitRenderAction_PostRenderError = (1 << 8);
if (!(*ioActionFlags & TEMP_kAudioUnitRenderAction_PostRenderError)){
CheckError(ExtAudioFileWriteAsync(testRecordFile, inNumberFrames, ioData), "ExtAudioFileWriteAsync failed");
}
}
return noErr;
}
When I run this the program pinwheels and goes into debugger mode (lldb) on the ExtAudioFileWriteAsync call. inNumberFrames = 512 and I have verified that I am getting stereo channels of Float32 audio data in ioData.
What am I missing here?
First, your code is still slightly complicated, and including some of "dark corners" of CoreAudio and Obj-C. It is a safer bet first making sure that everything works as intended in plain-C, on the real-time thread. As soon as you have debugged that part of code you can easily add as much Obj-C elegance as needed.
If ignoring possible endianness and file format conversion issues for simplicity, there is still one issue you either have to resolve automatically, using API utilities, or "manually":
AFAIK, data format for ExtAudioFileWriteAsync() must be interleaved, while the stream format for your AUGraph is not. Assuming we don't deal with endiannes and format conversion here, this is how you can fix it manually (single-channel example). In case your asbd stream format is non-interleaved stereo, you interleave data in your buffer like this: LRLRLRLRLR...
OSStatus MyRenderProc(void *inRefCon,
AudioUnitRenderActionFlags *ioActionFlags,
const AudioTimeStamp *inTimeStamp,
UInt32 inBusNumber,
UInt32 inNumberFrames,
AudioBufferList * ioData)
{
AudioBufferList bufferList;
Float32 samples[inNumberFrames+inNumberFrames];
bufferList.mNumberBuffers = 1;
bufferList.mBuffers[0].mData = samples;
bufferList.mBuffers[0].mNumberChannels = 1;
bufferList.mBuffers[0].mDataByteSize = (inNumberFrames+inNumberFrames)*sizeof(Float32);
Float32 *data = (Float32 *)ioData->mBuffers[0].mData;
if (*ioActionFlags & kAudioUnitRenderAction_PostRender){
static int TEMP_kAudioUnitRenderAction_PostRenderError = (1 << 8);
if (!(*ioActionFlags & TEMP_kAudioUnitRenderAction_PostRenderError) {
for(UInt32 i = 0; i < inNumberFrames; i++)
samples[i+i] = samples [i+i+1] = data[i];//copy buffer[0] to L & R
ExtAudioFileWriteAsync(testRecordFile, inNumberFrames, &bufferList);
}
}
return noErr;
}
This is just one example to show how it works. By studying asbd.mFormatFlags and setting the proper format in:
ExtAudioFileSetProperty(testRecordFile,
kExtAudioFileProperty_ClientDataFormat,
s,
&asbd);
you can achieve it more elegantly, but this exceeds the scope you this post by far.
Here's the working callback for a 16-bit linear big-endian stereo AIF file:
OSStatus MyRenderProc(void *inRefCon,
AudioUnitRenderActionFlags *ioActionFlags,
const AudioTimeStamp *inTimeStamp,
UInt32 inBusNumber,
UInt32 inNumberFrames,
AudioBufferList * ioData)
{
SInt16 samples[inNumberFrames + inNumberFrames];
AudioBufferList bufferList;
bufferList.mNumberBuffers = 1;
bufferList.mBuffers[0].mData = samples;
bufferList.mBuffers[0].mNumberChannels = 1;
bufferList.mBuffers[0].mDataByteSize = (inNumberFrames + inNumberFrames) * sizeof(SInt16);
Float32 *leftData = (Float32 *)ioData->mBuffers[0].mData;
Float32 *rightData = (Float32 *)ioData->mBuffers[1].mData;
if (*ioActionFlags & kAudioUnitRenderAction_PostRender){
static int TEMP_kAudioUnitRenderAction_PostRenderError = (1 << 8);
if (!(*ioActionFlags & TEMP_kAudioUnitRenderAction_PostRenderError)){
for (UInt32 i = 0; i < inNumberFrames; i++) {
samples[i + i] = CFSwapInt16HostToBig((SInt16) SHRT_MAX * (leftData)[i]);
samples[i + i + 1] = CFSwapInt16HostToBig((SInt16) SHRT_MAX * (rightData)[i]);
}
CheckError(ExtAudioFileWriteAsync(testRecordFile, inNumberFrames, &bufferList), "ExtAudioFileWriteAsync failed");
}
}
return noErr;
}

core audio how to generate a square wave with two channel ( stereo)

It's the code from Learning Core Audio http://www.amazon.com/Learning-Core-Audio-Hands-On-Programming/dp/0321636848
Just like the book says ,generating stereo should set asbd.mBitsPerChannel to 8 and asbd.mChannelsPerFrame to 2 . But the audio I get only has one channel . I don't know what's wrong with the code ,please help me . Thanks
#import <Foundation/Foundation.h>
#import <AudioToolbox/AudioToolbox.h>
#define SAMPLE_RATE 44100
#define DURATION 5.0
#define FILENAME_FORMAT #"%0.3f-square.aif"
int main(int argc, const char * argv[])
{
#autoreleasepool {
if (argc < 2) {
printf("Usage: CAToneFileGenerator n\n(where n is tone in Hz)");
return -1;
}
double hz = atof(argv[1]);
assert(hz > 0);
NSLog(#"generating %f hz tone", hz);
NSString *fileName = [NSString stringWithFormat:FILENAME_FORMAT, hz];
NSString *filePath = [[[NSFileManager defaultManager] currentDirectoryPath] stringByAppendingPathComponent:fileName];
NSLog(#"%#", filePath);
NSURL *fileURL = [NSURL fileURLWithPath:filePath];
AudioStreamBasicDescription asbd;
memset(&asbd, 0, sizeof(asbd));
asbd.mSampleRate = SAMPLE_RATE;
asbd.mFormatID = kAudioFormatLinearPCM;
asbd.mFormatFlags = kAudioFormatFlagIsBigEndian | kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked;
asbd.mBitsPerChannel = 8;
asbd.mChannelsPerFrame = 2;
asbd.mFramesPerPacket = 1;
asbd.mBytesPerFrame = 2;
asbd.mBytesPerPacket = 2;
AudioFileID audioFile;
OSStatus audioErr = noErr;
audioErr = AudioFileCreateWithURL((CFURLRef) fileURL, kAudioFileAIFFType, &asbd, kAudioFileFlags_EraseFile, &audioFile);
assert(audioErr == noErr);
long maxSampleCount = SAMPLE_RATE * DURATION;
long sampleCount = 0;
UInt32 bytesToWrite = 2;
double wavelengthInSamples = SAMPLE_RATE / hz;
while (sampleCount < maxSampleCount) {
for (int i = 0; i < wavelengthInSamples; i++) {
SInt16 sample;
if (i < wavelengthInSamples /2) {
sample = CFSwapInt16HostToBig(SHRT_MAX);
}else{
sample = CFSwapInt16HostToBig(SHRT_MIN);
}
audioErr = AudioFileWriteBytes(audioFile, false, sampleCount*2, &bytesToWrite, &sample);
assert(audioErr == noErr);
sampleCount ++;
}
}
audioErr = AudioFileClose(audioFile);
assert(audioErr == noErr);
NSLog(#"wrote %ld samples", sampleCount);
}
return 0;
}
Just changing the ASBD from the book code doesn't magically fix everything. You haven't accounted for how you're writing the samples to the file. Also, 8-bit is going to sound like ass.
Go back to mBitsPerChannel = 16, and then account for the fact you're writing two channels per frame, meaning that mBytesPerFrame and mBytesPerPacket will now be 4 (they were 2 in the book). Think about why this is.
Then you should just be able to add a second call to AudioFileWriteBytes() -- or do a loop where you count over mChannelsPerFrame -- right after the first one. But you'll have to account for the different offsets in the file, since you're writing 4 bytes each pass instead of 2. I think this is right:
audioErr = AudioFileWriteBytes(audioFile, false, sampleCount*4, &bytesToWrite, &sample); // left
audioErr = AudioFileWriteBytes(audioFile, false, (sampleCount*4)+2, &bytesToWrite, &sample); // right
You need to figure out some of this stuff on your own in order for it to sink in.

Dividing file into 10MB chunks

I have a script that divides file into 10MB chunks. Haven't had a problem with this script until I tried to do it on a 6GB file. Getting negative values on ranges even if they are uint64_t. Any suggestions on where is the error?
NSData *conData = [NSURLConnection sendSynchronousRequest:fileSizeRequest returningResponse:&response error:&error];
if (conData)
{
NSDictionary *headers = [response allHeaderFields];
NSString *fileSizeString = [headers objectForKey:#"Content-Length"];
uint64_t fileSize = strtoull([fileSizeString UTF8String], NULL, 0);
self.size += fileSize;
uint64_t amountOfRanges = fileSize / 10485760;
for (int i = 0; i <= amountOfRanges; i++)
{
uint64_t rangeMin = 0;
uint64_t rangeMax = 0;
if (i != amountOfRanges)
{
rangeMin = i * 10485760;
rangeMax = (i + 1) * 10485760 - 1;
}
else
{
if (i == 0)
{
rangeMin = 0;
rangeMax = fileSize - 1;
}
else
{
rangeMin = i * 10485760;
rangeMax = i * 10485760 - 1 + (fileSize - rangeMin);
}
}
}
}
You have a problem with expressions such as this:
rangeMin = i * 10485760;
Note that i is an int and 10485760 is an int literal, so the resulting int expression can easily overflow. You should ideally make i a uint64_t and/or use unsigned long long literals, e.g.
rangeMin = i * 10485760ULL;

audio unit playing m4a files

I've been working on this task for 12 days and i cant find any solution pleaaaaase help
i'm supposed to load about 80 m4a files and play some of them with augraph which contains mixer and remoteIO units thats how i load the files
OSStatus result;
for (int i = 0; i < [filePaths count]; i++) {
NSMutableArray *linearr=[[NSMutableArray alloc] init];
for (int j = 0; j < [[filePaths objectAtIndex:i] count]; j++) {
NSString *str=[[filePaths objectAtIndex:i] objectAtIndex:j];
CFURLRef audioFileURL = CFURLCreateFromFileSystemRepresentation (NULL, (const UInt8 *)[str cStringUsingEncoding:[NSString defaultCStringEncoding]] , strlen([str cStringUsingEncoding:[NSString defaultCStringEncoding]]), false);
ExtAudioFileRef audiofile;
ExtAudioFileOpenURL(audioFileURL, &audiofile);
assert(audiofile);
OSStatus err;
AudioStreamBasicDescription fileFormat;
UInt32 size = sizeof(fileFormat);
err = ExtAudioFileGetProperty(audiofile, kExtAudioFileProperty_FileDataFormat, &size, &fileFormat);
AudioFileID aFile;
//size = sizeof(aFile);
PropertySize =sizeof(PacketsToRead);
err = ExtAudioFileGetProperty(audiofile, kExtAudioFileProperty_AudioFile, &PropertySize, &aFile);
AudioFileTypeID fileType;
PropertySize = sizeof(fileType);
err = AudioFileGetProperty(aFile, kAudioFilePropertyFileFormat, &PropertySize, &fileType);
AudioStreamBasicDescription clientFormat;
bzero(&clientFormat, sizeof(clientFormat));
clientFormat.mChannelsPerFrame = 2;
clientFormat.mBytesPerFrame = 4;
clientFormat.mBytesPerPacket = clientFormat.mBytesPerFrame;
clientFormat.mFramesPerPacket = 1;
clientFormat.mBitsPerChannel = 32;
clientFormat.mFormatID = kAudioFormatLinearPCM;
clientFormat.mSampleRate = 44100.00;
clientFormat.mFormatFlags =kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsNonInterleaved; //kLinearPCMFormatFlagIsFloat | kAudioFormatFlagIsNonInterleaved;
err = ExtAudioFileSetProperty(audiofile, kExtAudioFileProperty_ClientDataFormat, sizeof(clientFormat), &clientFormat);
SInt64 numFrames = 0;
PropertySize = sizeof(numFrames);
err = ExtAudioFileGetProperty(audiofile, kExtAudioFileProperty_FileLengthFrames, &PropertySize, &numFrames);
NSNumber *pc = [NSNumber numberWithLongLong:numFrames];
[[packetCount objectAtIndex:i] replaceObjectAtIndex:j withObject:pc];
// create the buffers for reading in data
bufferList = malloc(sizeof(AudioBufferList) + sizeof(AudioBuffer) * (clientFormat.mChannelsPerFrame - 1));
bufferList->mNumberBuffers = clientFormat.mChannelsPerFrame;
for (int ii=0; ii < bufferList->mNumberBuffers; ++ii) {
bufferList->mBuffers[ii].mDataByteSize = sizeof(float) * numFrames;
bufferList->mBuffers[ii].mNumberChannels = 2;
bufferList->mBuffers[ii].mData = malloc(bufferList->mBuffers[ii].mDataByteSize);
}
UInt32 rFrames = 0;
rFrames =(UInt32)numFrames;
err = ExtAudioFileRead(audiofile, &rFrames, bufferList);
[linearr addObject:[NSData dataWithBytes:bufferList->mBuffers[1].mData length:numFrames]];
err = ExtAudioFileDispose(audiofile);
}
[audioData addObject:linearr];
}
and that's how i play it:
UInt32 *buslist;
buslist=( UInt32*)[[[audioData objectAtIndex:0] objectAtIndex:4]bytes ];
in rendercallback func:
for (int i = 0 ; i < ioData->mNumberBuffers; i++){
UInt32 *au3=au->mBuffers[0].mData;
AudioBuffer buffer = ioData->mBuffers[i];
UInt32 *frameBuffer = buffer.mData;
for (int j = 0; j < inNumberFrames; j++)
{
frameBuffer[j] = buflist[counter];
if(counter>=529200)
counter=0;
else
counter++;
}}}
Now when i play the sound i get the first part played with double speed then the second part only distortion.
I was having the exact same problem I think;
The incoming sound was at a lower sample rate, so the array I allocated wasn't big enough for the higher sample rate of the auGraph, and was too fast and short.
make sure you allocate an array to read the file something like this:
sarray->frames = totalFramesInFile * graphSampleRate / fileAudioFormat.mSampleRate;

ExtAudioFileWrite puzzler (kExtAudioFileError_InvalidOperationOrder error)

The following code skips the first second of audio in a pcm caf file and removes the last 5 seconds, writing to a temp file (which will be 6 seconds shorter than the input). The loop, everytime, produces a kExtAudioFileError_InvalidOperationOrder on the ExtAudioFileWrite. What am I doing wrong?
NSString *destURLString = [self.track.location absoluteString];
destURLString = [destURLString substringToIndex:([destURLString length] - 4)]; //remove .caf
destURLString = [NSString stringWithFormat:#"%#TMP.caf",destURLString]; //add tmp.caf
NSURL *destinationURL = [NSURL URLWithString:destURLString];
ExtAudioFileRef inputFile = NULL;
ExtAudioFileRef outputFile = NULL;
AudioStreamBasicDescription destFormat;
destFormat.mFormatID = kAudioFormatLinearPCM;
destFormat.mFormatFlags = kAudioFormatFlagsCanonical;
destFormat.mSampleRate = 22000;
destFormat.mFormatFlags = 0;
destFormat.mBytesPerPacket = 2;
destFormat.mFramesPerPacket = 1;
destFormat.mBytesPerFrame = 2;
destFormat.mChannelsPerFrame = 1;
destFormat.mBitsPerChannel = 16;
destFormat.mReserved = 0;
ExtAudioFileCreateWithURL((CFURLRef)destinationURL, kAudioFileCAFType, &destFormat, NULL, kAudioFileFlags_EraseFile, &outputFile);
OSStatus fileStatus = ExtAudioFileOpenURL((CFURLRef)track.location, &inputFile);
//AudioFileID fileID;
//OSStatus fileStatus = AudioFileOpenURL((CFURLRef)track.location, kAudioFileReadPermission, 0, &fileID);
//ExtAudioFileWrapAudioFileID (fileID, true, &inputFile);
OSStatus fileStatus2 = ExtAudioFileOpenURL((CFURLRef)destinationURL, &outputFile);
//NSLog(#"open status: %i", fileStatus2);
//find out how many frames long this file is
SInt64 length = 0;
UInt32 dataSize2 = (UInt32)sizeof(length);
OSStatus propStatus2 = ExtAudioFileGetProperty(inputFile, kExtAudioFileProperty_FileLengthFrames, &dataSize2, &length);
AudioStreamBasicDescription clientFormat;
clientFormat.mFormatID = kAudioFormatLinearPCM;
clientFormat.mSampleRate = 22000;
clientFormat.mFormatFlags = kAudioFormatFlagsCanonical;
clientFormat.mBitsPerChannel = 16;
clientFormat.mChannelsPerFrame = 1;
clientFormat.mFramesPerPacket = 1;
clientFormat.mBytesPerPacket = 2;
clientFormat.mBytesPerFrame = 2;
destFormat.mReserved = 0;
UInt32 size = sizeof(clientFormat);
//set the intermediate format to canonical on the source file for conversion (?)
OSStatus setpropstatus = ExtAudioFileSetProperty(inputFile, kExtAudioFileProperty_ClientDataFormat, size, &clientFormat);
OSStatus setpropstatusout = ExtAudioFileSetProperty(outputFile, kExtAudioFileProperty_ClientDataFormat, size, &clientFormat);
//UInt32 size = sizeof(destFormat);
//OSStatus setpropstatus = ExtAudioFileSetProperty(inputFile, kAudioFilePropertyDataFormat, size, &destFormat);
//NSLog(#"set prop status in %i", setpropstatus);
//NSLog(#"set prop status out %i", setpropstatusout);
OSStatus seekStatus = ExtAudioFileSeek(inputFile, (SInt64)22000); // skip one second of audio
NSLog(#"seekstatus %i", seekStatus);
SInt64 newLength = length - (5*22000); //shorten by 5 seconds worth of frames
NSLog(#"length: %i frames", length);
UInt8 *buffer = malloc(65536); //64K
UInt32 totalFramecount = 0;
while(true) {
AudioBufferList bufferList;
bufferList.mNumberBuffers = 1;
bufferList.mBuffers[0].mNumberChannels = 1;
bufferList.mBuffers[0].mData = buffer; //pointer to buffer of audio data
bufferList.mBuffers[0].mDataByteSize = 65536; //number of bytes in the buffer
UInt32 frameCount = 65536 / 2; //2 bytes per frame
// Read a chunk of input
OSStatus status = ExtAudioFileRead(inputFile, &frameCount, &bufferList);
totalFramecount += frameCount;
NSLog(#"read status %i", status);
//NSLog(#"loaded %f KB of data in %i frames", frameCount*2 / 1024.0, frameCount);
NSLog(#"loaded %i frames and stopping at %i", totalFramecount, newLength);
if (!frameCount || totalFramecount >= newLength) {
//termination condition
break;
}
OSStatus writeStatus = ExtAudioFileWrite(outputFile, frameCount, &bufferList);
NSLog(#"ws: %i", writeStatus);
}
free(buffer);
ExtAudioFileDispose(inputFile);
ExtAudioFileDispose(outputFile);
Turns out ExtAudioFileCreateWithURL returns a file already open, so the call to ExtAudioFileOpenURL was not needed, even though it returns successfully. I removed that and all works correctly.