audio unit playing m4a files - objective-c

I've been working on this task for 12 days and i cant find any solution pleaaaaase help
i'm supposed to load about 80 m4a files and play some of them with augraph which contains mixer and remoteIO units thats how i load the files
OSStatus result;
for (int i = 0; i < [filePaths count]; i++) {
NSMutableArray *linearr=[[NSMutableArray alloc] init];
for (int j = 0; j < [[filePaths objectAtIndex:i] count]; j++) {
NSString *str=[[filePaths objectAtIndex:i] objectAtIndex:j];
CFURLRef audioFileURL = CFURLCreateFromFileSystemRepresentation (NULL, (const UInt8 *)[str cStringUsingEncoding:[NSString defaultCStringEncoding]] , strlen([str cStringUsingEncoding:[NSString defaultCStringEncoding]]), false);
ExtAudioFileRef audiofile;
ExtAudioFileOpenURL(audioFileURL, &audiofile);
assert(audiofile);
OSStatus err;
AudioStreamBasicDescription fileFormat;
UInt32 size = sizeof(fileFormat);
err = ExtAudioFileGetProperty(audiofile, kExtAudioFileProperty_FileDataFormat, &size, &fileFormat);
AudioFileID aFile;
//size = sizeof(aFile);
PropertySize =sizeof(PacketsToRead);
err = ExtAudioFileGetProperty(audiofile, kExtAudioFileProperty_AudioFile, &PropertySize, &aFile);
AudioFileTypeID fileType;
PropertySize = sizeof(fileType);
err = AudioFileGetProperty(aFile, kAudioFilePropertyFileFormat, &PropertySize, &fileType);
AudioStreamBasicDescription clientFormat;
bzero(&clientFormat, sizeof(clientFormat));
clientFormat.mChannelsPerFrame = 2;
clientFormat.mBytesPerFrame = 4;
clientFormat.mBytesPerPacket = clientFormat.mBytesPerFrame;
clientFormat.mFramesPerPacket = 1;
clientFormat.mBitsPerChannel = 32;
clientFormat.mFormatID = kAudioFormatLinearPCM;
clientFormat.mSampleRate = 44100.00;
clientFormat.mFormatFlags =kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsNonInterleaved; //kLinearPCMFormatFlagIsFloat | kAudioFormatFlagIsNonInterleaved;
err = ExtAudioFileSetProperty(audiofile, kExtAudioFileProperty_ClientDataFormat, sizeof(clientFormat), &clientFormat);
SInt64 numFrames = 0;
PropertySize = sizeof(numFrames);
err = ExtAudioFileGetProperty(audiofile, kExtAudioFileProperty_FileLengthFrames, &PropertySize, &numFrames);
NSNumber *pc = [NSNumber numberWithLongLong:numFrames];
[[packetCount objectAtIndex:i] replaceObjectAtIndex:j withObject:pc];
// create the buffers for reading in data
bufferList = malloc(sizeof(AudioBufferList) + sizeof(AudioBuffer) * (clientFormat.mChannelsPerFrame - 1));
bufferList->mNumberBuffers = clientFormat.mChannelsPerFrame;
for (int ii=0; ii < bufferList->mNumberBuffers; ++ii) {
bufferList->mBuffers[ii].mDataByteSize = sizeof(float) * numFrames;
bufferList->mBuffers[ii].mNumberChannels = 2;
bufferList->mBuffers[ii].mData = malloc(bufferList->mBuffers[ii].mDataByteSize);
}
UInt32 rFrames = 0;
rFrames =(UInt32)numFrames;
err = ExtAudioFileRead(audiofile, &rFrames, bufferList);
[linearr addObject:[NSData dataWithBytes:bufferList->mBuffers[1].mData length:numFrames]];
err = ExtAudioFileDispose(audiofile);
}
[audioData addObject:linearr];
}
and that's how i play it:
UInt32 *buslist;
buslist=( UInt32*)[[[audioData objectAtIndex:0] objectAtIndex:4]bytes ];
in rendercallback func:
for (int i = 0 ; i < ioData->mNumberBuffers; i++){
UInt32 *au3=au->mBuffers[0].mData;
AudioBuffer buffer = ioData->mBuffers[i];
UInt32 *frameBuffer = buffer.mData;
for (int j = 0; j < inNumberFrames; j++)
{
frameBuffer[j] = buflist[counter];
if(counter>=529200)
counter=0;
else
counter++;
}}}
Now when i play the sound i get the first part played with double speed then the second part only distortion.

I was having the exact same problem I think;
The incoming sound was at a lower sample rate, so the array I allocated wasn't big enough for the higher sample rate of the auGraph, and was too fast and short.
make sure you allocate an array to read the file something like this:
sarray->frames = totalFramesInFile * graphSampleRate / fileAudioFormat.mSampleRate;

Related

core audio how to generate a square wave with two channel ( stereo)

It's the code from Learning Core Audio http://www.amazon.com/Learning-Core-Audio-Hands-On-Programming/dp/0321636848
Just like the book says ,generating stereo should set asbd.mBitsPerChannel to 8 and asbd.mChannelsPerFrame to 2 . But the audio I get only has one channel . I don't know what's wrong with the code ,please help me . Thanks
#import <Foundation/Foundation.h>
#import <AudioToolbox/AudioToolbox.h>
#define SAMPLE_RATE 44100
#define DURATION 5.0
#define FILENAME_FORMAT #"%0.3f-square.aif"
int main(int argc, const char * argv[])
{
#autoreleasepool {
if (argc < 2) {
printf("Usage: CAToneFileGenerator n\n(where n is tone in Hz)");
return -1;
}
double hz = atof(argv[1]);
assert(hz > 0);
NSLog(#"generating %f hz tone", hz);
NSString *fileName = [NSString stringWithFormat:FILENAME_FORMAT, hz];
NSString *filePath = [[[NSFileManager defaultManager] currentDirectoryPath] stringByAppendingPathComponent:fileName];
NSLog(#"%#", filePath);
NSURL *fileURL = [NSURL fileURLWithPath:filePath];
AudioStreamBasicDescription asbd;
memset(&asbd, 0, sizeof(asbd));
asbd.mSampleRate = SAMPLE_RATE;
asbd.mFormatID = kAudioFormatLinearPCM;
asbd.mFormatFlags = kAudioFormatFlagIsBigEndian | kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked;
asbd.mBitsPerChannel = 8;
asbd.mChannelsPerFrame = 2;
asbd.mFramesPerPacket = 1;
asbd.mBytesPerFrame = 2;
asbd.mBytesPerPacket = 2;
AudioFileID audioFile;
OSStatus audioErr = noErr;
audioErr = AudioFileCreateWithURL((CFURLRef) fileURL, kAudioFileAIFFType, &asbd, kAudioFileFlags_EraseFile, &audioFile);
assert(audioErr == noErr);
long maxSampleCount = SAMPLE_RATE * DURATION;
long sampleCount = 0;
UInt32 bytesToWrite = 2;
double wavelengthInSamples = SAMPLE_RATE / hz;
while (sampleCount < maxSampleCount) {
for (int i = 0; i < wavelengthInSamples; i++) {
SInt16 sample;
if (i < wavelengthInSamples /2) {
sample = CFSwapInt16HostToBig(SHRT_MAX);
}else{
sample = CFSwapInt16HostToBig(SHRT_MIN);
}
audioErr = AudioFileWriteBytes(audioFile, false, sampleCount*2, &bytesToWrite, &sample);
assert(audioErr == noErr);
sampleCount ++;
}
}
audioErr = AudioFileClose(audioFile);
assert(audioErr == noErr);
NSLog(#"wrote %ld samples", sampleCount);
}
return 0;
}
Just changing the ASBD from the book code doesn't magically fix everything. You haven't accounted for how you're writing the samples to the file. Also, 8-bit is going to sound like ass.
Go back to mBitsPerChannel = 16, and then account for the fact you're writing two channels per frame, meaning that mBytesPerFrame and mBytesPerPacket will now be 4 (they were 2 in the book). Think about why this is.
Then you should just be able to add a second call to AudioFileWriteBytes() -- or do a loop where you count over mChannelsPerFrame -- right after the first one. But you'll have to account for the different offsets in the file, since you're writing 4 bytes each pass instead of 2. I think this is right:
audioErr = AudioFileWriteBytes(audioFile, false, sampleCount*4, &bytesToWrite, &sample); // left
audioErr = AudioFileWriteBytes(audioFile, false, (sampleCount*4)+2, &bytesToWrite, &sample); // right
You need to figure out some of this stuff on your own in order for it to sink in.

Dividing file into 10MB chunks

I have a script that divides file into 10MB chunks. Haven't had a problem with this script until I tried to do it on a 6GB file. Getting negative values on ranges even if they are uint64_t. Any suggestions on where is the error?
NSData *conData = [NSURLConnection sendSynchronousRequest:fileSizeRequest returningResponse:&response error:&error];
if (conData)
{
NSDictionary *headers = [response allHeaderFields];
NSString *fileSizeString = [headers objectForKey:#"Content-Length"];
uint64_t fileSize = strtoull([fileSizeString UTF8String], NULL, 0);
self.size += fileSize;
uint64_t amountOfRanges = fileSize / 10485760;
for (int i = 0; i <= amountOfRanges; i++)
{
uint64_t rangeMin = 0;
uint64_t rangeMax = 0;
if (i != amountOfRanges)
{
rangeMin = i * 10485760;
rangeMax = (i + 1) * 10485760 - 1;
}
else
{
if (i == 0)
{
rangeMin = 0;
rangeMax = fileSize - 1;
}
else
{
rangeMin = i * 10485760;
rangeMax = i * 10485760 - 1 + (fileSize - rangeMin);
}
}
}
}
You have a problem with expressions such as this:
rangeMin = i * 10485760;
Note that i is an int and 10485760 is an int literal, so the resulting int expression can easily overflow. You should ideally make i a uint64_t and/or use unsigned long long literals, e.g.
rangeMin = i * 10485760ULL;

Accelerate Framework FFT to create audio fingerprint in iOS

First of all, sorry for my bad English.
I just started with the Objective-C a couple of week ago. I'm doing a project that require me to compared two audio signals recorded from two iOS devices. So far, I managed to record two .aif files from an iPhone 4s and iPhone 4. Then I try to apply the following algorithm A Highly Robust Audio Fingerprinting System. by: Jaap Haitsma" to get two fingerprints (in binary bit patterns 101011010) and then compare them with each other bits by bits. But so far, the result I got is somewhere between 45% and 55% which is pretty much the random probability between 0s and 1s. So can somebody give me any advice. Here's the code so far:
CalculateFingerprint *myCalculateFingerprint = [CalculateFingerprint alloc];
SInt16 *inputBuffer;
path4 = [documentsDirectory stringByAppendingPathComponent:fileName4];
/////////Calculate for the 4 file
fileURL = [NSURL fileURLWithPath:path4];
status = AudioFileOpenURL((__bridge CFURLRef)fileURL, kAudioFileReadPermission,kAudioFileAIFFType, &myAudioFile);
status = AudioFileGetPropertyInfo(myAudioFile,
kAudioFilePropertyAudioDataPacketCount,
&propertySizeDataPacketCount,
&writabilityDataPacketCount);
status = AudioFileGetProperty(myAudioFile,
kAudioFilePropertyAudioDataPacketCount,
&propertySizeDataPacketCount,
&numberOfPackets);
status = AudioFileGetPropertyInfo (myAudioFile,
kAudioFilePropertyMaximumPacketSize,
&propertySizeMaxPacketSize,
&writabilityMaxPacketSize);
status = AudioFileGetProperty(myAudioFile,
kAudioFilePropertyMaximumPacketSize,
&propertySizeMaxPacketSize,
&maxPacketSize);
inputBuffer = (SInt16 *)malloc(numberOfPackets * maxPacketSize);
currentPacket = 0;
status = AudioFileReadPackets(myAudioFile,
false, &numberOfBytesRead,
NULL,
currentPacket,
&numberOfPackets,
inputBuffer);
[myCalculateFingerprint calculateFingerprint:inputBuffer sampleCount:numberOfPackets index:indexFile];
status = AudioFileClose(myAudioFile);
Here's the calculation of fingerprint code:
-(void)calculateFingerprint :(SInt16*)samples
sampleCount:(int)sampleCount
index:(int)indexFile{
//Divide the audio signal into 32 frames
frames myFrames [32];
int stepFrames = sampleCount / 62;
int number = 0;
int index ;
for (int i = 0; i < 32; ++i){
index = 0;
myFrames[i].start = number;
myFrames[i].end = number + (32*stepFrames);
myFrames[i].dataFrames = (SInt16*)malloc((myFrames[i].end -number+1)*sizeof(SInt16));
for (int j = number;j<=myFrames[i].end; ++j){
myFrames[i].dataFrames[index] = samples[j];
++index;
}
number = number + stepFrames;
}
//Calculate FFT for each of the audio signal frames.
CalculateFFT *myCalculateFFT = [[CalculateFFT alloc] init];
theFFT myFFTData [32];
for (int i = 0; i <32; ++i){
myFFTData[i].FFTdata = [myCalculateFFT calculateFFTForData:myFrames[i].dataFrames];
}
//each index represent the frequency as followed:
// index i is frequency i * 44100/1024
//We only need 33 bands from 300 Hz to 2000Hz, so we will get the FFTdata from the index 7 to 40
float energy [33][33];
for (int i =0; i < 33; ++i){
energy[0][i] = 0;
}
int stepBand;
for (int i = 1; i < 33; ++i){
for (int j = 0; j < 33; ++j){
energy[i][j] = myFFTData[i].FFTdata[j+7];
}
}
//next we calculate the bits for the audio fingerprint
Float32 check = 0;
int fingerPrint [32][32];
NSMutableString *result = [[NSMutableString alloc]init];
for (int i = 0; i < 32; ++i){
for (int j = 0; j <32; ++j){
check = energy[i+1][j] -energy[i+1][j+1] -energy[i][j] +energy[i][j+1];
if (check > 0){
fingerPrint[i][j] = 1;//[tempBitFingerPrint addObject:[NSNumber numberWithInt:1]];
}else {
fingerPrint[i][j] = 0;//[tempBitFingerPrint addObject:[NSNumber numberWithInt:0]];
}
[result appendString:[NSString stringWithFormat:#"%d",fingerPrint[i][j]]];
}
}
And finally the FFT calculation code:
-(void)FFTSetup{
UInt32 maxFrames = 1024;
originalReal = (float*) malloc(maxFrames*sizeof(float));
originalRealTransfer = (float*)malloc(maxFrames*sizeof(float));
obtainedReal = (float*) malloc(maxFrames *sizeof(float));
freqArray = (Float32*) malloc((maxFrames/2) *sizeof(Float32));
fftLog2n = log2f(maxFrames);
fftN = 1 << fftLog2n;
fftNOver2 = maxFrames/2;
fftBufferCapacity = maxFrames;
fftIndex = 0;
fftA.realp = (float*)malloc(fftNOver2*sizeof(float));
fftA.imagp = (float*)malloc(fftNOver2*sizeof(float));
fftSetup = vDSP_create_fftsetup(fftLog2n,FFT_RADIX2);
}
-(Float32*) calculateFFTForData:(SInt16*)sampleData
{
[self FFTSetup];
int stride = 1;
for (int i = 0; i < fftN; ++i){
originalReal[i] = (float) sampleData[i];
}
UInt32 maxFrames = 1024;
//Apply Hann window on the data
int windowSize = maxFrames;
float * window = (float*)malloc(sizeof(float)*windowSize);
memset(window, 0, sizeof(float)*windowSize);
vDSP_hann_window(window, windowSize, vDSP_HANN_NORM);
vDSP_vmul(originalReal,1,window,1,originalRealTransfer,1,windowSize);
vDSP_ctoz((COMPLEX*) originalRealTransfer,2,&fftA,1,fftNOver2);
vDSP_fft_zrip(fftSetup,&fftA, stride,fftLog2n,FFT_FORWARD);
float scale = (float) 1.0 /(2*fftN);
vDSP_vsmul(fftA.realp,1,&scale,fftA.realp,1,fftNOver2);
vDSP_vsmul(fftA.imagp,1,&scale,fftA.imagp,1,fftNOver2);
vDSP_ztoc(&fftA,1,(COMPLEX*)obtainedReal,2,fftNOver2);
int index = 0;
NSMutableString *testResult = [[NSMutableString alloc]init];
for (int i = 0; i < fftN; i=i+2){
freqArray[index] = (obtainedReal[i]*obtainedReal[i])+(obtainedReal[i+1]*obtainedReal[i+1]);
[testResult appendString:[NSString stringWithFormat:#"%f ",freqArray[index]]];
++index;
}
return freqArray;
}

Objective-C RC4 Decryption

I am new to Objective-C, but am an experienced developer (C#), but I can't figure this out:
I have a string which is RC4 encrypted, and I need to decrypt it using Objective-C on the iPad (iOS 5.0). I have looked all over the net for a working example, but have had no luck finding an example that works end-to-end. Not only does the code below not return the decrypted string correctly, it returns something different every time it executes, which makes me thing a pointer is being released someplace.
Note: I do not know if it matters, but the string was encrypted using http://archive.plugins.jquery.com/project/RC4 and then stored as text in a Sqlite database, which I am now accessing from Objective-C (I know, the architecture sounds messy, but I can't change that at this point.)
The code I am using is (taken from RC4 encryption - CommonCrypto (Objective-C) vs PHP):
+ (NSString*)decryptData:(NSData*) dataToDecrypt
{
const void *vplainText;
size_t plainTextBufferSize;
plainTextBufferSize = [dataToDecrypt length];
vplainText = [dataToDecrypt bytes];
CCCryptorStatus ccStatus;
uint8_t *bufferPtr = NULL;
size_t bufferPtrSize = 0;
size_t movedBytes = 0;
bufferPtrSize = (plainTextBufferSize + kCCBlockSize3DES) & ~(kCCBlockSize3DES - 1);
bufferPtr = malloc( bufferPtrSize * sizeof(uint8_t));
memset((void *)bufferPtr, 0x0, bufferPtrSize);
NSString *key = #"theKeyIUsedtoEncryptInTheFirstPlace";
const void *vkey = (const void *) [key UTF8String];
size_t keyLength = [[key dataUsingEncoding:NSUTF8StringEncoding] length];
ccStatus = CCCrypt(kCCDecrypt,
kCCAlgorithmRC4,
0,
vkey,
kCCKeySizeDES,
nil,
vplainText,
plainTextBufferSize,
(void *)bufferPtr,
bufferPtrSize,
&movedBytes);
if (ccStatus == kCCSuccess) NSLog(#"SUCCESS");
/*else*/ if (ccStatus == kCCParamError) return #"PARAM ERROR";
else if (ccStatus == kCCBufferTooSmall) return #"BUFFER TOO SMALL";
else if (ccStatus == kCCMemoryFailure) return #"MEMORY FAILURE";
else if (ccStatus == kCCAlignmentError) return #"ALIGNMENT";
else if (ccStatus == kCCDecodeError) return #"DECODE ERROR";
else if (ccStatus == kCCUnimplemented) return #"UNIMPLEMENTED";
NSString *result = [[ NSString alloc ] initWithData: [NSData dataWithBytes:(const void *)bufferPtr length:(NSUInteger)movedBytes] encoding:NSASCIIStringEncoding];
NSLog(#"%#", result);
return result;
}
Use this function for encryption and decryption. (Just put in the encoded string with same key again to decode it).
-(NSString*) rc4Key:(NSString*) key str:(NSString*) str
{
int j = 0;
unichar res[str.length];
const unichar* buffer = res;
unsigned char s[256];
for (int i = 0; i < 256; i++)
{
s[i] = i;
}
for (int i = 0; i < 256; i++)
{
j = (j + s[i] + [key characterAtIndex:(i % key.length)]) % 256;
swap(s[i], s[j]);
}
int i = j = 0;
for (int y = 0; y < str.length; y++)
{
i = (i + 1) % 256;
j = (j + s[i]) % 256;
swap(s[i], s[j]);
unsigned char f = [str characterAtIndex:y] ^ s[ (s[i] + s[j]) % 256];
res[y] = f;
}
return [NSString stringWithCharacters:buffer length:str.length];
}
I see a couple of references to DES in your code (kCCKeySizeDES, kCCBlockSize3DES). That doesn't seem right -- at a minimum, kCCKeySizeDES should probably be replaced with keyLength.
If that doesn't solve it, I'd look next at possible text encoding issues. The data in SQLite might be UTF8-encoded binary data, in which case you'll probably have to "decode" it by converting from UTF8 to ISO8859-1.
RC4 implementation translated from .net:
+(NSString*)RC4:(NSString *)data key:(NSString *)key
{
id x;
int y = 0;
int i = 0;
int j = 0;
NSMutableArray *box = [[NSMutableArray alloc] initWithCapacity:256];
NSString *result = #"";
for (i = 0; i < 256; i++) {
[box addObject:[NSNumber numberWithInt:i]];
}
for (i = 0; i < 256; i++) {
j = ((int)[key characterAtIndex:(i % key.length)] + [[box objectAtIndex:i] intValue] + j) % 256;
x = [box objectAtIndex:i];
[box setObject:[box objectAtIndex:j] atIndexedSubscript:i];
[box setObject:x atIndexedSubscript:j];
}
for (i = 0; i < data.length; i++) {
y = i % 256;
j = ([[box objectAtIndex:y] intValue] + j) % 256;
x = [box objectAtIndex:y];
[box setObject:[box objectAtIndex:j] atIndexedSubscript:y];
[box setObject:x atIndexedSubscript:j];
NSString *c = [NSString stringWithFormat:#"%c", ([data characterAtIndex:i] ^ (char)[[box objectAtIndex:([[box objectAtIndex:y] intValue] + [[box objectAtIndex:j] intValue]) % 256] intValue])];
result = [result stringByAppendingString:c];
}
return result;
}

ExtAudioFileWrite puzzler (kExtAudioFileError_InvalidOperationOrder error)

The following code skips the first second of audio in a pcm caf file and removes the last 5 seconds, writing to a temp file (which will be 6 seconds shorter than the input). The loop, everytime, produces a kExtAudioFileError_InvalidOperationOrder on the ExtAudioFileWrite. What am I doing wrong?
NSString *destURLString = [self.track.location absoluteString];
destURLString = [destURLString substringToIndex:([destURLString length] - 4)]; //remove .caf
destURLString = [NSString stringWithFormat:#"%#TMP.caf",destURLString]; //add tmp.caf
NSURL *destinationURL = [NSURL URLWithString:destURLString];
ExtAudioFileRef inputFile = NULL;
ExtAudioFileRef outputFile = NULL;
AudioStreamBasicDescription destFormat;
destFormat.mFormatID = kAudioFormatLinearPCM;
destFormat.mFormatFlags = kAudioFormatFlagsCanonical;
destFormat.mSampleRate = 22000;
destFormat.mFormatFlags = 0;
destFormat.mBytesPerPacket = 2;
destFormat.mFramesPerPacket = 1;
destFormat.mBytesPerFrame = 2;
destFormat.mChannelsPerFrame = 1;
destFormat.mBitsPerChannel = 16;
destFormat.mReserved = 0;
ExtAudioFileCreateWithURL((CFURLRef)destinationURL, kAudioFileCAFType, &destFormat, NULL, kAudioFileFlags_EraseFile, &outputFile);
OSStatus fileStatus = ExtAudioFileOpenURL((CFURLRef)track.location, &inputFile);
//AudioFileID fileID;
//OSStatus fileStatus = AudioFileOpenURL((CFURLRef)track.location, kAudioFileReadPermission, 0, &fileID);
//ExtAudioFileWrapAudioFileID (fileID, true, &inputFile);
OSStatus fileStatus2 = ExtAudioFileOpenURL((CFURLRef)destinationURL, &outputFile);
//NSLog(#"open status: %i", fileStatus2);
//find out how many frames long this file is
SInt64 length = 0;
UInt32 dataSize2 = (UInt32)sizeof(length);
OSStatus propStatus2 = ExtAudioFileGetProperty(inputFile, kExtAudioFileProperty_FileLengthFrames, &dataSize2, &length);
AudioStreamBasicDescription clientFormat;
clientFormat.mFormatID = kAudioFormatLinearPCM;
clientFormat.mSampleRate = 22000;
clientFormat.mFormatFlags = kAudioFormatFlagsCanonical;
clientFormat.mBitsPerChannel = 16;
clientFormat.mChannelsPerFrame = 1;
clientFormat.mFramesPerPacket = 1;
clientFormat.mBytesPerPacket = 2;
clientFormat.mBytesPerFrame = 2;
destFormat.mReserved = 0;
UInt32 size = sizeof(clientFormat);
//set the intermediate format to canonical on the source file for conversion (?)
OSStatus setpropstatus = ExtAudioFileSetProperty(inputFile, kExtAudioFileProperty_ClientDataFormat, size, &clientFormat);
OSStatus setpropstatusout = ExtAudioFileSetProperty(outputFile, kExtAudioFileProperty_ClientDataFormat, size, &clientFormat);
//UInt32 size = sizeof(destFormat);
//OSStatus setpropstatus = ExtAudioFileSetProperty(inputFile, kAudioFilePropertyDataFormat, size, &destFormat);
//NSLog(#"set prop status in %i", setpropstatus);
//NSLog(#"set prop status out %i", setpropstatusout);
OSStatus seekStatus = ExtAudioFileSeek(inputFile, (SInt64)22000); // skip one second of audio
NSLog(#"seekstatus %i", seekStatus);
SInt64 newLength = length - (5*22000); //shorten by 5 seconds worth of frames
NSLog(#"length: %i frames", length);
UInt8 *buffer = malloc(65536); //64K
UInt32 totalFramecount = 0;
while(true) {
AudioBufferList bufferList;
bufferList.mNumberBuffers = 1;
bufferList.mBuffers[0].mNumberChannels = 1;
bufferList.mBuffers[0].mData = buffer; //pointer to buffer of audio data
bufferList.mBuffers[0].mDataByteSize = 65536; //number of bytes in the buffer
UInt32 frameCount = 65536 / 2; //2 bytes per frame
// Read a chunk of input
OSStatus status = ExtAudioFileRead(inputFile, &frameCount, &bufferList);
totalFramecount += frameCount;
NSLog(#"read status %i", status);
//NSLog(#"loaded %f KB of data in %i frames", frameCount*2 / 1024.0, frameCount);
NSLog(#"loaded %i frames and stopping at %i", totalFramecount, newLength);
if (!frameCount || totalFramecount >= newLength) {
//termination condition
break;
}
OSStatus writeStatus = ExtAudioFileWrite(outputFile, frameCount, &bufferList);
NSLog(#"ws: %i", writeStatus);
}
free(buffer);
ExtAudioFileDispose(inputFile);
ExtAudioFileDispose(outputFile);
Turns out ExtAudioFileCreateWithURL returns a file already open, so the call to ExtAudioFileOpenURL was not needed, even though it returns successfully. I removed that and all works correctly.