Error in AudioFileWritePackets - objective-c

i'm just trying to capture the mic and save data to wav file ! and here is the code:
AudioFileID FileID=remoteIOplayer->mixAudioFile;
if (inBusNumber == 16){
AudioUnitRender(audioUnit, ioActionFlags, inTimeStamp, 1, inNumberFrames, ioData);
OSStatus result = AudioFileWritePackets(FileID, false, (inNumberFrames * 4), NULL, mixpacketNum, &inNumberFrames, ioData->mBuffers[i].mData);
if (result != noErr){
NSLog(#"Error Writing");
mixpacketNum += inNumberFrames;
}
}
and i created the file correctly with the following format that also used to capture the mic:
audioFormat.mSampleRate = 44100.00;
audioFormat.mFormatID = kAudioFormatLinearPCM;
audioFormat.mFormatFlags = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked;
audioFormat.mFramesPerPacket = 1;
audioFormat.mChannelsPerFrame = 1;
audioFormat.mBitsPerChannel = 16;
audioFormat.mBytesPerPacket = 2;
audioFormat.mBytesPerFrame = 2;
the error is result OSStatus -38 fnopnerr (file not open)

I'm guessing that your call to open the audio file either failed or wasn't done before rendering started. What does your call to AudioFileCreateWithURL look like?

Related

How to to create a 1D Texture in Vulkan

I have a problem to create a 1D texture and I don't know how to solve it.
The texture is a float pData[256][4].
The code is like this:
VkDeviceSize imageSize = 256 * 4 * 4;
uint32_t texMemSize = ftsize;
VkBuffer stagingBuffer;
VkDeviceMemory stagingMemory;
VkBufferCreateInfo bufferCreateInfo = {};
bufferCreateInfo.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
bufferCreateInfo.size = imageSize;
bufferCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT;
bufferCreateInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
if (vkCreateBuffer(device, &bufferCreateInfo, nullptr, &stagingBuffer) != VK_SUCCESS) {
throw std::runtime_error("failed to create buffer!");
}
VkMemoryAllocateInfo memAllocInfo2 = { };
memAllocInfo2.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
VkMemoryRequirements memReqs2 = {};
vkGetBufferMemoryRequirements(device, stagingBuffer, &memReqs2);
memAllocInfo2.allocationSize = memReqs2.size;
memAllocInfo2.memoryTypeIndex = findMemoryType(memReqs2.memoryTypeBits, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT);
if (vkAllocateMemory(device, &memAllocInfo2, nullptr, &stagingMemory) != VK_SUCCESS) {
throw std::runtime_error("failed to allocate memory 2!");
}
vkBindBufferMemory(device, stagingBuffer, stagingMemory, 0);
void* data;
vkMapMemory(device, stagingMemory, 0, imageSize, 0, &data);
memcpy(data, pData, static_cast<size_t>(imageSize));
vkUnmapMemory(device, stagingMemory);
delete pData;
//
VkImageCreateInfo imageInfo = {};
imageInfo.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
imageInfo.imageType = VK_IMAGE_TYPE_1D;
imageInfo.extent.width = static_cast<uint32_t>(256);
imageInfo.extent.depth = 1;
imageInfo.extent.height = 1;
imageInfo.mipLevels = 1;
imageInfo.arrayLayers = 1;
imageInfo.format = VK_FORMAT_R32G32B32A32_UINT;
imageInfo.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
imageInfo.usage = VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_SAMPLED_BIT;
imageInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
imageInfo.samples = VK_SAMPLE_COUNT_1_BIT;
imageInfo.flags = 0; // Optional
imageInfo.tiling = VK_IMAGE_TILING_OPTIMAL;
if (vkCreateImage(device, &imageInfo, nullptr, &tftextureImage) != VK_SUCCESS) {
throw std::runtime_error("failed to create image!");
}
The code give me always the runtime error "failed to create image", I tried to change some things like the format, the extent, etc but I can't solve it.
I believe that the mistake is only in the imageInfo part because the first one run ok.

encode .wav file using ffmpeg in objective c or c

I have to encode .wav file and write it into same file,or other file using
ffmpeg library,here is my code for encoding
-(void)audioencode:(const char *)fileName
{
AVFrame *frame;
AVPacket pkt;
int i, j, k, ret, got_output;
int buffer_size;
FILE *f;
uint16_t *samples;
const char *format_name = "wav",
const char *file_url = "/Users/xxxx/Downloads/simple-drum-beat.wav";
avcodec_register_all();
av_register_all();
AVOutputFormat *format = NULL;
for (AVOutputFormat *formatIter = av_oformat_next(NULL); formatIter != NULL; formatIter = av_oformat_next(formatIter)
{
int hasEncoder = NULL != avcodec_find_encoder(formatIter->audio_codec);
if (0 == strcmp(format_name, formatIter->name)) {
format = formatIter;
break;
}
}
AVCodec *codec = avcodec_find_encoder(format->audio_codec);
NSLog(#"tet test tststs");
AVCodecContext *c;
c = avcodec_alloc_context3(codec);
if (!c) {
fprintf(stderr, "Could not allocate audio codec context\n");
exit(1);
}
c->sample_fmt = AV_SAMPLE_FMT_S16;
if (!check_sample_fmt(codec, c->sample_fmt)) {
fprintf(stderr, "Encoder does not support sample format %s",
av_get_sample_fmt_name(c->sample_fmt));
exit(1);
}
c->bit_rate = 64000;//705600;
c->sample_rate = select_sample_rate(codec);
c->channel_layout = select_channel_layout(codec);
c->channels = av_get_channel_layout_nb_channels(c->channel_layout);
c->frame_size = av_get_audio_frame_duration(c, 16);
int bits_per_sample = av_get_bits_per_sample(c->codec_id);
int frameSize = av_get_audio_frame_duration(c,16);
/* open it */
if (avcodec_open2(c, codec, NULL) < 0) {
fprintf(stderr, "Could not open codec\n");
exit(1);
}
f = fopen(fileName, "wb");
if (!f) {
fprintf(stderr, "Could not open %s\n", fileName);
exit(1);
}
/* frame containing input raw audio */
frame = av_frame_alloc();
if (!frame) {
fprintf(stderr, "Could not allocate audio frame\n");
exit(1);
}
frame->nb_samples = frameSize/*c->frame_size*/;
frame->format = c->sample_fmt;
frame->channel_layout = c->channel_layout;
buffer_size = av_samples_get_buffer_size(NULL, c->channels,frameSize /*c->frame_size*/,
c->sample_fmt, 0);
samples = av_malloc(buffer_size);
if (!samples) {
fprintf(stderr, "Could not allocate %d bytes for samples buffer\n",
buffer_size);
exit(1);
}
/* setup the data pointers in the AVFrame */
ret = avcodec_fill_audio_frame(frame, c->channels, c->sample_fmt,
(const uint8_t*)samples, buffer_size, 0);
if (ret < 0) {
fprintf(stderr, "Could not setup audio frame\n");
exit(1);
}
float t, tincr;
/* encode a single tone sound */
t = 0;
tincr = 2 * M_PI * 440.0 / c->sample_rate;
for(i=0;i<800;i++) {
av_init_packet(&pkt);
pkt.data = NULL; // packet data will be allocated by the encoder
pkt.size = 0;
for (j = 0; j < frameSize/*c->frame_size*/; j++) {
samples[2*j] = (int)(sin(t) * 10000);
for (k = 1; k < c->channels; k++)
samples[2*j + k] = samples[2*j];
t += tincr;
}
/* encode the samples */
ret = avcodec_encode_audio2(c, &pkt, frame, &got_output);
if (ret < 0) {
fprintf(stderr, "Error encoding audio frame\n");
exit(1);
}
if (got_output) {
fwrite(pkt.data, 1, pkt.size, f);
av_free_packet(&pkt);
}
}
}
but after encoded file size is zero,
Please suggest what m doing wrong,any help will be appreciate, thanks in advance

How do I connect a Mixer with an AUGraph

I want to create an AUGraph that has the following AudioUnits:
1 * 440HZ sine wave generator
1 * 880HZ sine wave generator
1 * Mixer
1 * Output
If I connect my sine wave generators to my mixer, then the mixer to the output I get no sound.
If I connect a sine wave generator directly to the output I do get sound, probably because the Output unit connects to the sine wave generator callback.
Is there something I'm missing with how this should be connected?
Full code sample, Connect 1 * Joins 1 is not working, comment these out and run Connect 2 and Joins 2 to hear the sine wave working
//
// main.c
// TestAudioUnit
//
// Created by Chris Davis on 25/08/2013.
// Copyright (c) 2013 Chris Davis. All rights reserved.
//
#include <CoreFoundation/CoreFoundation.h>
#import <AudioToolbox/AudioToolbox.h>
typedef struct MyAUGraphPlayer
{
AudioStreamBasicDescription streamFormat;
AUGraph graph;
AUNode output;
AUNode mixer;
AUNode sine;
AudioUnit audioUnits[3];
AudioBufferList *inputBuffer;
Float64 firstInputSampleTime;
Float64 firstOutputSampleTime;
Float64 inToOutSampleTimeOffset;
} MyAUGraphPlayer;
OSStatus SineWaveRenderCallback(void * inRefCon,
AudioUnitRenderActionFlags * ioActionFlags,
const AudioTimeStamp * inTimeStamp,
UInt32 inBusNumber,
UInt32 inNumberFrames,
AudioBufferList * ioData)
{
// inRefCon is the context pointer we passed in earlier when setting the render callback
double currentPhase = *((double *)inRefCon);
// ioData is where we're supposed to put the audio samples we've created
Float32 * outputBuffer = (Float32 *)ioData->mBuffers[0].mData;
const double frequency = 880.0;
const double phaseStep = (frequency / 44100.) * (M_PI * 2.);
for(int i = 0; i < inNumberFrames; i++) {
outputBuffer[i] = sin(currentPhase);
currentPhase += phaseStep;
}
// If we were doing stereo (or more), this would copy our sine wave samples
// to all of the remaining channels
for(int i = 1; i < ioData->mNumberBuffers; i++) {
memcpy(ioData->mBuffers[i].mData, outputBuffer, ioData->mBuffers[i].mDataByteSize);
}
// writing the current phase back to inRefCon so we can use it on the next call
*((double *)inRefCon) = currentPhase;
return noErr;
}
int main(int argc, const char * argv[])
{
MyAUGraphPlayer *player = {0};
MyAUGraphPlayer p = {0};
player=&p;
NewAUGraph(&player->graph);
//Output
{
AudioComponentDescription description = {
.componentType = kAudioUnitType_Output,
.componentSubType = kAudioUnitSubType_DefaultOutput,
.componentManufacturer = kAudioUnitManufacturer_Apple
};
AUGraphAddNode(player->graph, &description, &player->output);
AudioComponent comp = AudioComponentFindNext(NULL, &description);
AudioComponentInstanceNew(comp, &player->audioUnits[0]);
AudioUnitInitialize(player->audioUnits[0]);
AudioStreamBasicDescription ASBD = {
.mSampleRate = 44100,
.mFormatID = kAudioFormatLinearPCM,
.mFormatFlags = kAudioFormatFlagsNativeFloatPacked,
.mChannelsPerFrame = 1,
.mFramesPerPacket = 1,
.mBitsPerChannel = sizeof(Float32) * 8,
.mBytesPerPacket = sizeof(Float32),
.mBytesPerFrame = sizeof(Float32)
};
AudioUnitSetProperty(player->audioUnits[0],
kAudioUnitProperty_StreamFormat,
kAudioUnitScope_Global,
0,
&ASBD,
sizeof(ASBD));
}
//Mixer
{
AudioComponentDescription description = {
.componentType = kAudioUnitType_Mixer,
.componentSubType = kAudioUnitSubType_StereoMixer,
.componentManufacturer = kAudioUnitManufacturer_Apple
};
AUGraphAddNode(player->graph, &description, &player->mixer);
AudioComponent comp = AudioComponentFindNext(NULL, &description);
AudioComponentInstanceNew(comp, &player->audioUnits[1]);
AudioUnitInitialize(player->audioUnits[1]);
}
//Sine
{
AudioComponentDescription description = {
.componentType = kAudioUnitType_Generator,
.componentSubType = kAudioUnitSubType_ScheduledSoundPlayer,
.componentManufacturer = kAudioUnitManufacturer_Apple
};
AUGraphAddNode(player->graph, &description, &player->sine);
AudioComponent comp = AudioComponentFindNext(NULL, &description);
AudioComponentInstanceNew(comp, &player->audioUnits[2]);
AudioUnitInitialize(player->audioUnits[2]);
}
//Connect 1
{
AURenderCallbackStruct callbackInfo = {
.inputProc = SineWaveRenderCallback,
.inputProcRefCon = player
};
AudioUnitSetProperty(player->audioUnits[1],
kAudioUnitProperty_SetRenderCallback,
kAudioUnitScope_Global,
0,
&callbackInfo,
sizeof(callbackInfo));
}
//Joins 1 - sine to mixer to outout
{
AUGraphConnectNodeInput(player->graph,
player->sine,
0,
player->mixer,
0);
AUGraphConnectNodeInput(player->graph,
player->mixer,
0,
player->output,
0);
}
//connect 2
/*{
AURenderCallbackStruct callbackInfo = {
.inputProc = SineWaveRenderCallback,
.inputProcRefCon = player
};
AudioUnitSetProperty(player->audioUnits[0],
kAudioUnitProperty_SetRenderCallback,
kAudioUnitScope_Global,
0,
&callbackInfo,
sizeof(callbackInfo));
}
//Joins 2 - sine direct to output
{
AUGraphConnectNodeInput(player->graph,
player->sine,
0,
player->output,
0);
}*/
AUGraphInitialize(player->graph);
player->firstOutputSampleTime = -1;
AudioOutputUnitStart(player->audioUnits[0]);
AUGraphStart(player->graph);
printf("enter key to stop\n");
getchar();
return 0;
}
This code correctly plays the sine wave, however, I get lots of errors from CoreAudio during setup:
//
// main.c
// TestAudioUnit
//
// Created by Chris Davis on 25/08/2013.
// Copyright (c) 2013 Chris Davis. All rights reserved.
//
#include <CoreFoundation/CoreFoundation.h>
#import <AudioToolbox/AudioToolbox.h>
typedef struct MyAUGraphPlayer
{
AudioStreamBasicDescription streamFormat;
AUGraph graph;
AUNode output;
AUNode mixer;
AUNode sine;
AudioUnit audioUnits[3];
AudioBufferList *inputBuffer;
Float64 firstInputSampleTime;
Float64 firstOutputSampleTime;
Float64 inToOutSampleTimeOffset;
} MyAUGraphPlayer;
OSStatus SineWaveRenderCallback(void * inRefCon,
AudioUnitRenderActionFlags * ioActionFlags,
const AudioTimeStamp * inTimeStamp,
UInt32 inBusNumber,
UInt32 inNumberFrames,
AudioBufferList * ioData)
{
// inRefCon is the context pointer we passed in earlier when setting the render callback
double currentPhase = *((double *)inRefCon);
// ioData is where we're supposed to put the audio samples we've created
Float32 * outputBuffer = (Float32 *)ioData->mBuffers[0].mData;
const double frequency = 880.0;
const double phaseStep = (frequency / 44100.) * (M_PI * 2.);
for(int i = 0; i < inNumberFrames; i++) {
outputBuffer[i] = sin(currentPhase);
currentPhase += phaseStep;
}
// If we were doing stereo (or more), this would copy our sine wave samples
// to all of the remaining channels
for(int i = 1; i < ioData->mNumberBuffers; i++) {
memcpy(ioData->mBuffers[i].mData, outputBuffer, ioData->mBuffers[i].mDataByteSize);
}
// writing the current phase back to inRefCon so we can use it on the next call
*((double *)inRefCon) = currentPhase;
return noErr;
}
int main(int argc, const char * argv[])
{
MyAUGraphPlayer *player = {0};
MyAUGraphPlayer p = {0};
player=&p;
NewAUGraph(&player->graph);
OSStatus result = 0;
AudioStreamBasicDescription ASBD = {
.mSampleRate = 44100,
.mFormatID = kAudioFormatLinearPCM,
.mFormatFlags = kAudioFormatFlagsNativeFloatPacked,
.mChannelsPerFrame = 2,
.mFramesPerPacket = 1,
.mBitsPerChannel = sizeof(Float32) * 8,
.mBytesPerPacket = sizeof(Float32),
.mBytesPerFrame = sizeof(Float32)
};
//Output
{
AudioComponentDescription description = {
.componentType = kAudioUnitType_Output,
.componentSubType = kAudioUnitSubType_DefaultOutput,
.componentManufacturer = kAudioUnitManufacturer_Apple
};
result = AUGraphAddNode(player->graph, &description, &player->output);
printf("err: %d\n", result);
AudioComponent comp = AudioComponentFindNext(NULL, &description);
result = AudioComponentInstanceNew(comp, &player->audioUnits[0]);
printf("err: %d\n", result);
result = AudioUnitInitialize(player->audioUnits[0]);
printf("err: %d\n", result);
}
//Mixer
{
AudioComponentDescription description = {
.componentType = kAudioUnitType_Mixer,
.componentSubType = kAudioUnitSubType_StereoMixer,
.componentManufacturer = kAudioUnitManufacturer_Apple
};
result = AUGraphAddNode(player->graph, &description, &player->mixer);
printf("err: %d\n", result);
AudioComponent comp = AudioComponentFindNext(NULL, &description);
result = AudioComponentInstanceNew(comp, &player->audioUnits[1]);
printf("err: %d\n", result);
}
//Sine
{
AudioComponentDescription description = {
.componentType = kAudioUnitType_Generator,
.componentSubType = kAudioUnitSubType_ScheduledSoundPlayer,
.componentManufacturer = kAudioUnitManufacturer_Apple
};
result = AUGraphAddNode(player->graph, &description, &player->sine);
printf("err: %d\n", result);
AudioComponent comp = AudioComponentFindNext(NULL, &description);
result = AudioComponentInstanceNew(comp, &player->audioUnits[2]);
printf("err: %d\n", result);
result = AudioUnitInitialize(player->audioUnits[2]);
printf("err: %d\n", result);
}
result = AUGraphConnectNodeInput(player->graph,
player->sine,
0,
player->mixer,
0);
printf("err: %d\n", result);
result = AUGraphConnectNodeInput(player->graph,
player->mixer,
0,
player->output,
0);
printf("err: %d\n", result);
result = AUGraphOpen(player->graph);
printf("err: %d\n", result);
UInt32 numbuses = 1;
result = AudioUnitSetProperty(player->audioUnits[1], kAudioUnitProperty_ElementCount, kAudioUnitScope_Input, 0, &numbuses, sizeof(numbuses));
printf("err: %d\n", result);
for (UInt32 i = 0; i <= numbuses; ++i) {
// setup render callback struct
AURenderCallbackStruct rcbs;
rcbs.inputProc = &SineWaveRenderCallback;
rcbs.inputProcRefCon = &player;
printf("set AUGraphSetNodeInputCallback\n");
// set a callback for the specified node's specified input
result = AUGraphSetNodeInputCallback(player->graph, player->mixer, i, &rcbs);
printf("AUGraphSetNodeInputCallback err: %d\n", result);
printf("set input bus %d, client kAudioUnitProperty_StreamFormat\n", (unsigned int)i);
// set the input stream format, this is the format of the audio for mixer input
result = AudioUnitSetProperty(player->audioUnits[1], kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, i, &ASBD, sizeof(ASBD));
printf("err: %d\n", result);
}
result = AudioUnitSetProperty(player->audioUnits[1], kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output, 0, &ASBD, sizeof(ASBD));
printf("err: %d\n", result);
OSStatus status = AUGraphInitialize(player->graph);
printf("err: %d\n", status);
player->firstOutputSampleTime = -1;
AudioOutputUnitStart(player->audioUnits[0]);
AUGraphStart(player->graph);
printf("enter key to stop\n");
getchar();
return 0;
}

CFURL does not give full data when download

Iam trying to use CFHTTP to write a small downloader. Unfortunately I cannot use NSURL which is very easy. I basically want an async way to download the data and store it in a file. I have not yet found how to do async way but I have some code with Sync approach which not working as well. The problem is the downloader does not download the complete bytes. I see some data missing causing the final file to be corrupt. Here is the code I have so far.
#include <stdio.h>
#include <string.h>
#include <CoreFoundation/CoreFoundation.h>
#include <CFNetwork/CFNetwork.h>
#include <CFNetwork/CFHTTPStream.h>
int main(int argc, const char * argv[])
{
// insert code here...
const char *data;
FILE *fp;
CFShow(CFSTR("Hello, World!\n"));
CFURLRef cfUrl = CFURLCreateWithString(kCFAllocatorDefault, CFSTR("http://the.earth.li/~sgtatham/putty/0.62/x86/putty.zip"), NULL);
CFHTTPMessageRef cfHttpReq = CFHTTPMessageCreateRequest(kCFAllocatorDefault, CFSTR("GET"), cfUrl, kCFHTTPVersion1_1);
CFReadStreamRef readStream = CFReadStreamCreateForHTTPRequest(kCFAllocatorDefault, cfHttpReq);
CFReadStreamOpen(readStream);
CFHTTPMessageRef cfHttpResp = CFHTTPMessageCreateEmpty(kCFAllocatorDefault, TRUE);
CFIndex numBytesRead;
do {
const int nBuffSize = 1024;
UInt8 buff[nBuffSize];
numBytesRead = CFReadStreamRead(readStream, buff, nBuffSize);
if( numBytesRead > 0 )
{
CFHTTPMessageAppendBytes(cfHttpResp, buff, numBytesRead);
}
else if( numBytesRead < 0 )
{
CFStreamError error = CFReadStreamGetError(readStream);
printf ("Error %d", error.error);
}
} while ( numBytesRead > 0 );
CFStringRef myStatusLine = CFHTTPMessageCopyResponseStatusLine(cfHttpReq);
CFReadStreamClose(readStream);
CFDataRef cfResp = CFHTTPMessageCopyBody(cfHttpResp);
CFIndex length = CFDataGetLength(cfResp);
printf ("%lu\n", length);
CFShow(myStatusLine);
data = (const char*)CFDataGetBytePtr(cfResp);
fp = fopen("/var/tmp/Update.zip", "w");
if (fp == NULL) {
printf("ACnnot be opened\n");
} else {
fwrite(data, length, 1, fp);
fclose(fp);
}
printf ("Download done\n");
CFRelease(cfUrl);
CFRelease(cfHttpReq);
CFRelease(readStream);
CFRelease(cfHttpResp);
CFRelease(cfResp);
return 0;
}
The length I get is less than my actual file download. I dont understand whats is wrong with this code. Can someone please help me with this?
I still dont know what went wrong in this code. But I fixed my problem by replacing
//CFHTTPMessageRef cfHttpResp = CFHTTPMessageCreateEmpty(kCFAllocatorDefault, TRUE);
CFMutableDataRef cfResp = CFDataCreateMutable(kCFAllocatorDefault, 0);
I now use MutableDataRef instead of CFHTTPMessageRef. I will upadte the new code here.
int main(int argc, const char * argv[])
{
// insert code here...
const char *data;
FILE *fp;
CFShow(CFSTR("Hello, World!\n"));
CFURLRef cfUrl = CFURLCreateWithString(kCFAllocatorDefault, CFSTR("http://the.earth.li/~sgtatham/putty/0.62/x86/putty.zip"), NULL);
CFHTTPMessageRef cfHttpReq = CFHTTPMessageCreateRequest(kCFAllocatorDefault, CFSTR("GET"), cfUrl, kCFHTTPVersion1_1);
CFReadStreamRef readStream = CFReadStreamCreateForHTTPRequest(kCFAllocatorDefault, cfHttpReq);
CFReadStreamOpen(readStream);
//CFHTTPMessageRef cfHttpResp = CFHTTPMessageCreateEmpty(kCFAllocatorDefault, TRUE);
CFMutableDataRef cfResp = CFDataCreateMutable(kCFAllocatorDefault, 0);
CFIndex numBytesRead;
do {
const int nBuffSize = 1024;
UInt8 buff[nBuffSize];
numBytesRead = CFReadStreamRead(readStream, buff, nBuffSize);
if( numBytesRead > 0 )
{
CFDataAppendBytes(cfResp, buff, numBytesRead);
//CFHTTPMessageAppendBytes(cfHttpResp, buff, numBytesRead);
}
else if( numBytesRead < 0 )
{
CFStreamError error = CFReadStreamGetError(readStream);
printf ("Error %d", error.error);
}
} while ( numBytesRead > 0 );
CFReadStreamClose(readStream);
//CFDataRef cfResp = CFHTTPMessageCopyBody(cfHttpResp);
CFIndex length = CFDataGetLength(cfResp);
printf ("%lu\n", length);
data = (const char*)CFDataGetBytePtr(cfResp);
fp = fopen("/var/tmp/Update.zip", "w");
if (fp == NULL) {
printf("ACnnot be opened\n");
} else {
fwrite(data, length, 1, fp);
fclose(fp);
}
printf ("Download done\n");
CFRelease(cfUrl);
CFRelease(cfHttpReq);
CFRelease(readStream);
CFRelease(cfResp);
return 0;
}
Hope this helps. It will still be useful if someone can point out what went wrong original code.
I know this late, but your code really helped me out (it is surprisingly hard to find straight forward, complete, examples of getting the response body bytes using CFNetworking API).
Anyway, I think the mistake was the last parameter should be FALSE (response message), and not TRUE (request message) in CFHTTPMessageCreateEmpty line.
CFHTTPMessageRef cfHttpResp = CFHTTPMessageCreateEmpty(kCFAllocatorDefault, FALSE);

Objective C Generating (empty) WAV file

I'm trying to save some audio data to a WAV file -- I have audio data that normally I've been using in RemoteIO but I'm now trying to implement a function to save the data. I know the audio data is valid, so that's not a concern -- if I can just get an empty WAV file set up of the correct length, I can fill it with data later.
Right now, the code creates the file and it looks to be the right length in bytes, but apparently it's not formatted correctly, because OSX, QuickTime, iTunes, etc can't recognize it (they see the file, can't determine a length, or play it)
NSURL * tvarFilename = [savePanel URL];
NSLog(#"doSaveAs filename = %#",tvarFilename);
//try to create an audio file there
AudioFileID mRecordFile;
AudioStreamBasicDescription audioFormat;
audioFormat.mSampleRate = 44100.00;
audioFormat.mFormatID = kAudioFormatLinearPCM;
audioFormat.mFormatFlags = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked;
audioFormat.mFramesPerPacket = 1;
audioFormat.mChannelsPerFrame = 2;
audioFormat.mBitsPerChannel = 16;
audioFormat.mBytesPerPacket = 4;
audioFormat.mBytesPerFrame = 4;
OSStatus status = AudioFileCreateWithURL((CFURLRef)tvarFilename, kAudioFileWAVEType, &audioFormat, kAudioFileFlags_EraseFile, &mRecordFile);
int beatsToRecord = 4; //temporary
int bpm = 120;
double intervalInSamples = (double) 60 / bpm;
intervalInSamples *= (double)44100;
int inNumberFrames = (intervalInSamples * beatsToRecord);
UInt32 frameBuffer[inNumberFrames];
int sampleTime = 0;
UInt32 thisSubBuffer[inNumberFrames];
for (int i = 0; i < inNumberFrames; i++) { frameBuffer[i] = 0; }
UInt32 bytesToWrite = inNumberFrames * sizeof(UInt32);
status = AudioFileWriteBytes(mRecordFile, false, 0, &bytesToWrite, &frameBuffer);
A WAV file is really simple: it only consists of a (usually 44-byte long) header section, then the raw PCM data. I've written a library which needs to record WAV files, and I'm pretty sure you'll understand how I accomplish it. For clarifying:
/**
* CD quality: 44100 Hz sample rate, 16 bit per sample, 2 channels (stereo):
**/
struct sprec_wav_header *hdr = sprec_wav_header_from_params(44100, 16, 2);
int filesize = (obtain the filesize somehow here);
/**
* -8 bytes for the first part of the header, see the WAV specification
**/
hdr->filesize = filesize - 8;
int filedesc = open("/tmp/dummy.wav", O_WRONLY | O_CREAT, 0644);
if (sprec_wav_header_write(filedesc, hdr))
{
printf("Error writing WAV header!\n");
}
close(filedesc);
free(hdr);
And the library I've written: https://github.com/H2CO3/libsprec/
Hope this helps.
The problem in code in initial question was in missing AudioFileClose(mRecordFile); line in the very end.
For those who still search for working sample without using 3rd party libraries, here is slightly modified code snippet:
- (void)createSilentWAVFileAtURL:(NSURL *)fileURL {
AudioFileID mRecordFile;
AudioStreamBasicDescription audioFormat;
audioFormat.mSampleRate = 44100.00;
audioFormat.mFormatID = kAudioFormatLinearPCM;
audioFormat.mFormatFlags = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked;
audioFormat.mFramesPerPacket = 1;
audioFormat.mChannelsPerFrame = 2;
audioFormat.mBitsPerChannel = 16;
audioFormat.mBytesPerPacket = 4;
audioFormat.mBytesPerFrame = 4;
OSStatus status = AudioFileCreateWithURL((__bridge CFURLRef)fileURL, kAudioFileWAVEType, &audioFormat, kAudioFileFlags_EraseFile, &mRecordFile);
double intervalInSamples = 0.5;
intervalInSamples *= audioFormat.mSampleRate * audioFormat.mChannelsPerFrame;
int beatsToRecord = 4; //seconds of silence
int inNumberFrames = (intervalInSamples * beatsToRecord);
UInt32 frameBuffer[inNumberFrames];
for (int i = 0; i < inNumberFrames; i++) { frameBuffer[i] = 0; }
UInt32 bytesToWrite = inNumberFrames * sizeof(uint32_t);
status = AudioFileWriteBytes(mRecordFile, false, 0, &bytesToWrite, &frameBuffer);
status = AudioFileClose(mRecordFile);
NSAssert(status == noErr, #"");
}
P.S.: To decrease final file size reduce mChannelsPerFrame from 2 to 1 and mSampleRate (e.g. to 11000)