Canon sdk internal error at edsDownload - canon-sdk

I have internal error at the EdsDownload. I'am using EOS 1100D without sdcard. The Canon sample code (CameraContorol) can download image. Used SDK: 2.13 and 2.12 (i have same problem with both). Please help me.
Note: the "picture000x.jpg" size is 0 after the run (because the EdsCreateFileStream call).
My code output:
Device name: Canon EOS 1100D.
GET PICTURE:
'Call' handleObjectEvent.
Event: kEdsStateEvent_JobStatusChanged,
Event code: 770
Internal error at EdsDownload.
stream != NULL
Download h.ended.
Press key to quit.
My code:
bool getPictureEnabled = false,
hasCamera = false;
EdsCameraRef camera;
EdsImageRef picture;
volatile EdsUInt32 bufferSize;
EdsError DownloadImage(EdsDirectoryItemRef directoryItem);
handleSateEvent:
EdsError EDSCALLBACK handleSateEvent(EdsStateEvent event, EdsUInt32 parameter, EdsVoid * context) {
switch(event) {
case kEdsStateEvent_CaptureError:
printf("Event: kEdsStateEvent_CaptureError,\n");
throw "Camera capture error.";
case kEdsStateEvent_JobStatusChanged:
printf("Event: kEdsStateEvent_JobStatusChanged,\n");
break;
default:
break;
}
printf("Event code: %d.\n", (int)event);
return EDS_ERR_OK;
}
handleObjectEvent:
EdsError EDSCALLBACK handleObjectEvent(EdsObjectEvent event, EdsBaseRef object, EdsVoid * context) {
if(event == kEdsObjectEvent_DirItemCreated || event == kEdsObjectEvent_DirItemRequestTransfer || event == kEdsObjectEvent_DirItemRequestTransferDT) {
if(!getPictureEnabled) {
printf("object event handler: FALSE alert\n");
if(object) EdsRelease(object);
return EDS_ERR_OK;
}
//EdsRetain(object);
DownloadImage(object);
}
if(object) EdsRelease(object);
return EDS_ERR_OK;
}
main function:
int main(int parC, char *pars[]) {
int processCounter = 0;
EdsCameraListRef eclr;
EdsError edsError = EdsInitializeSDK();
if(edsError) {
printf("I can't initialize the Eds sdk.\n");
goto stop;
}
processCounter = 1;
edsError = EdsGetCameraList(&eclr);
if(edsError) {
printf("I can't get the camera list.\n");
goto stop;
}
processCounter = 2;
EdsUInt32 camCount;
edsError = EdsGetChildCount(eclr, &camCount);
if(edsError) {
printf("I can't read the camera count.\n");
goto stop;
}
hasCamera = camCount > 0;
if(!hasCamera) {
printf("There is no camera in the list.\n");
goto stop;
}
edsError = EdsGetChildAtIndex(eclr, 0, &camera);
if(edsError) {
printf("I can't get the camera.\n");
goto stop;
}
edsError = EdsSetCameraStateEventHandler(camera, kEdsStateEvent_All, handleSateEvent, NULL);
if(edsError) printf("Error at EdsSetCameraStateEventHandler.\n");
edsError = EdsSetObjectEventHandler(camera, kEdsObjectEvent_All, handleObjectEvent, NULL);
if(edsError) {
printf("Error at EdsSetObjectEventHandler.\n");
goto stop;
}
edsError = EdsOpenSession(camera);
if(edsError) {
printf("I can't open the session.\n");
goto stop;
}
processCounter = 3;
EdsDeviceInfo outDeviceInfo;
edsError = EdsGetDeviceInfo(camera, &outDeviceInfo);
if(edsError) printf("I can't get device info.\n");
else printf("Device name: %s.\n", (char *) outDeviceInfo.szDeviceDescription);
EdsUInt32 saveTo = kEdsSaveTo_Host;
edsError = EdsSetPropertyData(camera, kEdsPropID_SaveTo, 0, sizeof(EdsUInt32) , &saveTo);
if(edsError != EDS_ERR_OK) printf("Error at EdsSetPropertyData -> kEdsSaveTo.\n");
EdsCapacity capacity = {0x7FFFFFFF, 0x1000, 1};
edsError = EdsSetCapacity(camera, capacity);
if(edsError != EDS_ERR_OK) printf("Error at EdsSetPropertyData -> kEdsSaveTo.\n");
printf("GET PICTURE:\n");
getPictureEnabled = true;
edsError = EdsSendCommand(camera, kEdsCameraCommand_TakePicture , 0);
if(edsError) printf("ERROR: Take picture.\n");
else { //handleObjectEvent "call":
printf("'Call' handleObjectEvent.\n");
edsError = EdsCloseSession(camera);
if(edsError) printf("I can't close the session 1.\n");
edsError = EdsOpenSession(camera);
if(edsError) printf("I can't open the session 2.\n");
}
stop:
printf("Press key to quit...\n");
getchar();
switch (processCounter) {
case 3:
edsError = EdsCloseSession(camera);
if(edsError) printf("I can't close the session 2.\n");
case 2:
edsError = EdsRelease(eclr);
if(edsError) printf("I can't release eds cam. list.\n");
case 1:
EdsTerminateSDK();
default: break;
}
return 0;
}
DownloadImage function:
EdsError DownloadImage(EdsDirectoryItemRef directoryItem) {
CoInitializeEx(NULL, COINIT_APARTMENTTHREADED ); //i tried without this, and don't screen change
EdsError err;
EdsStreamRef stream = NULL;
EdsDirectoryItemInfo dirItemInfo;
err = EdsGetDirectoryItemInfo(directoryItem, &dirItemInfo);
if(err == EDS_ERR_OK) {
err = EdsCreateFileStream(dirItemInfo.szFileName, kEdsFileCreateDisposition_CreateAlways, kEdsAccess_ReadWrite, &stream);
if(err != EDS_ERR_OK) printf("ERROR: EdsCreateFileStream.\n");
bufferSize = dirItemInfo.size;
//EdsSetProgressCallback(stream, ProgressFunc, kEdsProgressOption_Periodically, NULL);
}
else printf("ERROR 1\n");
//err = EdsSendStatusCommand(camera, kEdsCameraStatusCommand_UILock, 0); //i tried with this, and don't screen change
//err = EdsSendStatusCommand(camera, kEdsCameraStatusCommand_EnterDirectTransfer, 0); //i tried with this, and don't screen change
if(err) {
printf("ERROR: transfer lock.\n");
}//*/
//if(err == EDS_ERR_OK)
//do
//{
err = EdsDownload(directoryItem, dirItemInfo.size, stream); //HERE IS THE PROBLEM: EDS_ERR_INTERNAL_ERROR! (after that row: err = 2)
//}
//while(err != EDS_ERR_OK);//*/
if(err == EDS_ERR_INTERNAL_ERROR) printf("\n\t\tInternal error at EdsDownload.\n");
//if(err == EDS_ERR_OK)
{
err = EdsDownloadComplete(directoryItem);
}
// Release stream
if( stream != NULL) {
EdsCreateImageRef(stream, &picture);
EdsRelease(stream);
stream = NULL;
printf(" stream != NULL\n");
}
//err = EdsSendStatusCommand(camera, kEdsCameraStatusCommand_ExitDirectTransfer, 0); //i tried with this, and don't screen change
//err = EdsSendStatusCommand(camera, kEdsCameraStatusCommand_UIUnLock, 0); //i tried with this, and don't screen change
if(err) {
printf("ERROR: transfer lock.\n");
return NULL;
}//*/
printf("Download ended.\n");
CoUninitialize(); //i tried without this, and don't screen change
return err;
}

Thank to All!
I have the solution. This is not in the sdk document.
Here is
and here. This problem exists, if you are using Windows operation system.
The concrete solution:
while(GetMessage(&msg, NULL, NULL, NULL))
{
TranslateMessage(&msg);
DispatchMessage(&msg);
}
I will fix the code, erease the unecessary comments and it will working to everyone! (Soon.)

Could you check the call for "EdsCreateFileStream", especially the first parameter ? According to the docs "It creates a new file on a host computer (or opens an existing file) and creates a file stream for access to the file." The first parameter should be a local, accessible file path (e.g. "C:\tmp.jpg" on windows), and from quickly looking at this it might be you don't specify this correctly. Could you print this?

Related

Decode .wav file and write it into another file using ffmpeg

How to decode a .wav file and write it into another file using ffmpeg?
I got decoded data by this piece of code:
-(void)audioDecode:(const char *)outfilename inFileName:(const char *)filename
{
const char* input_filename=filename;
//avcodec_register_all();
av_register_all();
avcodec_register_all();
// av_register_all();
//av_ini
// AVFormatContext* container=avformat_alloc_context();
// if(avformat_open_input(&container,filename,NULL,NULL)<0){
// NSLog(#"Could not open file");
// }
AVCodec *codec;
AVCodecContext *c= NULL;
int len;
FILE *f, *outfile;
uint8_t inbuf[AUDIO_INBUF_SIZE + FF_INPUT_BUFFER_PADDING_SIZE];
AVPacket avpkt;
AVFrame *decoded_frame = NULL;
av_init_packet(&avpkt);
printf("Decode audio file %s to %s\n", filename, outfilename);
avcodec_register_all();
AVFrame* frame = av_frame_alloc();
if (!frame)
{
fprintf(stderr, "Could not allocate audio frame\n");
exit(1);
}
AVFormatContext* formatContext = NULL;
/* Opening the file, and check if it has opened */
if (avformat_open_input(&formatContext, filename, NULL, NULL) != 0)
{
av_frame_free(&frame);
NSLog(#"Could not open file");
}
if (avformat_find_stream_info(formatContext, NULL) < 0)
{
av_frame_free(&frame);
avformat_close_input(&formatContext);
NSLog(#"Error finding the stream info");
}
outfile = fopen(outfilename, "wb");
if (!outfile) {
av_free(c);
exit(1);
}
/* Find the audio Stream, if no audio stream are found, clean and exit */
AVCodec* cdc = NULL;
int streamIndex = av_find_best_stream(formatContext, AVMEDIA_TYPE_AUDIO, -1, -1, &cdc, 0);
if (streamIndex < 0)
{
av_frame_free(&frame);
avformat_close_input(&formatContext);
NSLog(#"Could not find any audio stream in the file");
exit(1);
}
/* Open the audio stream to read data in audioStream */
AVStream* audioStream = formatContext->streams[streamIndex];
/* Initialize the codec context */
AVCodecContext* codecContext = audioStream->codec;
codecContext->codec = cdc;
/* Open the codec, and verify if it has opened */
if (avcodec_open2(codecContext, codecContext->codec, NULL) != 0)
{
av_frame_free(&frame);
avformat_close_input(&formatContext);
NSLog(#"Couldn't open the context with the decoder");
exit(1);
}
/* Initialize buffer to store compressed packets */
AVPacket readingPacket;
av_init_packet(&readingPacket);
int lenght = 1;
long long int chunk = ((formatContext->bit_rate)*lenght/8);
int j=0;
int count = 0;
//audioChunk output;
while(av_read_frame(formatContext, &readingPacket)==0){
//if((count+readingPacket.size)>start){
if(readingPacket.stream_index == audioStream->index){
AVPacket decodingPacket = readingPacket;
// Audio packets can have multiple audio frames in a single packet
while (decodingPacket.size > 0){
// Try to decode the packet into a frame
// Some frames rely on multiple packets, so we have to make sure the frame is finished before
// we can use it
int gotFrame = 0;
int result = avcodec_decode_audio4(codecContext, frame, &gotFrame, &decodingPacket);
count += result;
if (result >= 0 && gotFrame)
{
decodingPacket.size -= result;
decodingPacket.data += result;
int a;
for(int i=0;i<result-1;i++){
fwrite(frame->data[0], 1, decodingPacket.size, outfile);
// *(output.data+j)=frame->data[0][i];
//
j++;
if(j>=chunk) break;
}
// We now have a fully decoded audio frame
}
else
{
decodingPacket.size = 0;
decodingPacket.data = NULL;
}
// if(j>=chunk) break;
}
}
// }else count+=readingPacket.size;
//
// // To prevent memory leak, must free packet.
// av_free_packet(&readingPacket);
// if(j>=chunk) break;
}
fclose(outfile);
But the file is created with zero bytes. I don't know what's wrong with this code. And I got one more error: "Format adp detected only with low score of 25, misdetection possible!"

Record rtsp stream with ffmpeg in iOS

I've followed iFrameExtractor to successfully stream rtsp in my swift project. In this project, it also has recording function. It basically use avformat_write_header
, av_interleaved_write_frame and av_write_trailer to save the rtsp source into mp4 file.
When I used this project in my device, the rtsp streaming works fine, but recording function will always generate a blank mp4 file with no image and sound.
Could anyone tell me what step that I miss?
I'm using iPhone5 with iOS 9.1 and XCode 7.1.1.
The ffmpeg is 2.8.3 version and followed the compile instruction by CompilationGuide – FFmpeg
Following is the sample code in this project
The function that generate every frame:
-(BOOL)stepFrame {
// AVPacket packet;
int frameFinished=0;
static bool bFirstIFrame=false;
static int64_t vPTS=0, vDTS=0, vAudioPTS=0, vAudioDTS=0;
while(!frameFinished && av_read_frame(pFormatCtx, &packet)>=0) {
// Is this a packet from the video stream?
if(packet.stream_index==videoStream) {
// 20130525 albert.liao modified start
// Initialize a new format context for writing file
if(veVideoRecordState!=eH264RecIdle)
{
switch(veVideoRecordState)
{
case eH264RecInit:
{
if ( !pFormatCtx_Record )
{
int bFlag = 0;
//NSString *videoPath = [NSHomeDirectory() stringByAppendingPathComponent:#"Documents/test.mp4"];
NSString *videoPath = #"/Users/liaokuohsun/iFrameTest.mp4";
const char *file = [videoPath UTF8String];
pFormatCtx_Record = avformat_alloc_context();
bFlag = h264_file_create(file, pFormatCtx_Record, pCodecCtx, pAudioCodecCtx,/*fps*/0.0, packet.data, packet.size );
if(bFlag==true)
{
veVideoRecordState = eH264RecActive;
fprintf(stderr, "h264_file_create success\n");
}
else
{
veVideoRecordState = eH264RecIdle;
fprintf(stderr, "h264_file_create error\n");
}
}
}
//break;
case eH264RecActive:
{
if((bFirstIFrame==false) &&(packet.flags&AV_PKT_FLAG_KEY)==AV_PKT_FLAG_KEY)
{
bFirstIFrame=true;
vPTS = packet.pts ;
vDTS = packet.dts ;
#if 0
NSRunLoop *pRunLoop = [NSRunLoop currentRunLoop];
[pRunLoop addTimer:RecordingTimer forMode:NSDefaultRunLoopMode];
#else
[NSTimer scheduledTimerWithTimeInterval:5.0//2.0
target:self
selector:#selector(StopRecording:)
userInfo:nil
repeats:NO];
#endif
}
// Record audio when 1st i-Frame is obtained
if(bFirstIFrame==true)
{
if ( pFormatCtx_Record )
{
#if PTS_DTS_IS_CORRECT==1
packet.pts = packet.pts - vPTS;
packet.dts = packet.dts - vDTS;
#endif
h264_file_write_frame( pFormatCtx_Record, packet.stream_index, packet.data, packet.size, packet.dts, packet.pts);
}
else
{
NSLog(#"pFormatCtx_Record no exist");
}
}
}
break;
case eH264RecClose:
{
if ( pFormatCtx_Record )
{
h264_file_close(pFormatCtx_Record);
#if 0
// 20130607 Test
dispatch_async(dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT, 0), ^(void)
{
ALAssetsLibrary *library = [[ALAssetsLibrary alloc]init];
NSString *filePathString = [NSHomeDirectory() stringByAppendingPathComponent:#"Documents/test.mp4"];
NSURL *filePathURL = [NSURL fileURLWithPath:filePathString isDirectory:NO];
if(1)// ([library videoAtPathIsCompatibleWithSavedPhotosAlbum:filePathURL])
{
[library writeVideoAtPathToSavedPhotosAlbum:filePathURL completionBlock:^(NSURL *assetURL, NSError *error){
if (error) {
// TODO: error handling
NSLog(#"writeVideoAtPathToSavedPhotosAlbum error");
} else {
// TODO: success handling
NSLog(#"writeVideoAtPathToSavedPhotosAlbum success");
}
}];
}
[library release];
});
#endif
vPTS = 0;
vDTS = 0;
vAudioPTS = 0;
vAudioDTS = 0;
pFormatCtx_Record = NULL;
NSLog(#"h264_file_close() is finished");
}
else
{
NSLog(#"fc no exist");
}
bFirstIFrame = false;
veVideoRecordState = eH264RecIdle;
}
break;
default:
if ( pFormatCtx_Record )
{
h264_file_close(pFormatCtx_Record);
pFormatCtx_Record = NULL;
}
NSLog(#"[ERROR] unexpected veVideoRecordState!!");
veVideoRecordState = eH264RecIdle;
break;
}
}
// Decode video frame
avcodec_decode_video2(pCodecCtx, pFrame, &frameFinished, &packet);
}
else if(packet.stream_index==audioStream)
{
// 20131024 albert.liao modfied start
static int vPktCount=0;
BOOL bIsAACADTS = FALSE;
int ret = 0;
if(aPlayer.vAACType == eAAC_UNDEFINED)
{
tAACADTSHeaderInfo vxAACADTSHeaderInfo = {0};
bIsAACADTS = [AudioUtilities parseAACADTSHeader:(uint8_t *)packet.data ToHeader:&vxAACADTSHeaderInfo];
}
#synchronized(aPlayer)
{
if(aPlayer==nil)
{
aPlayer = [[AudioPlayer alloc]initAudio:nil withCodecCtx:(AVCodecContext *) pAudioCodecCtx];
NSLog(#"aPlayer initAudio");
if(bIsAACADTS)
{
aPlayer.vAACType = eAAC_ADTS;
//NSLog(#"is ADTS AAC");
}
}
else
{
if(vPktCount<5) // The voice is listened once image is rendered
{
vPktCount++;
}
else
{
if([aPlayer getStatus]!=eAudioRunning)
{
dispatch_async(dispatch_get_main_queue(), ^(void) {
#synchronized(aPlayer)
{
NSLog(#"aPlayer start play");
[aPlayer Play];
}
});
}
}
}
};
#synchronized(aPlayer)
{
int ret = 0;
ret = [aPlayer putAVPacket:&packet];
if(ret <= 0)
NSLog(#"Put Audio Packet Error!!");
}
// 20131024 albert.liao modfied end
if(bFirstIFrame==true)
{
switch(veVideoRecordState)
{
case eH264RecActive:
{
if ( pFormatCtx_Record )
{
h264_file_write_audio_frame(pFormatCtx_Record, pAudioCodecCtx, packet.stream_index, packet.data, packet.size, packet.dts, packet.pts);
}
else
{
NSLog(#"pFormatCtx_Record no exist");
}
}
}
}
}
else
{
//fprintf(stderr, "packet len=%d, Byte=%02X%02X%02X%02X%02X\n",\
packet.size, packet.data[0],packet.data[1],packet.data[2],packet.data[3], packet.data[4]);
}
// 20130525 albert.liao modified end
}
return frameFinished!=0;
}
avformat_write_header:
int h264_file_create(const char *pFilePath, AVFormatContext *fc, AVCodecContext *pCodecCtx,AVCodecContext *pAudioCodecCtx, double fps, void *p, int len )
{
int vRet=0;
AVOutputFormat *of=NULL;
AVStream *pst=NULL;
AVCodecContext *pcc=NULL, *pAudioOutputCodecContext=NULL;
avcodec_register_all();
av_register_all();
av_log_set_level(AV_LOG_VERBOSE);
if(!pFilePath)
{
fprintf(stderr, "FilePath no exist");
return -1;
}
if(!fc)
{
fprintf(stderr, "AVFormatContext no exist");
return -1;
}
fprintf(stderr, "file=%s\n",pFilePath);
// Create container
of = av_guess_format( 0, pFilePath, 0 );
fc->oformat = of;
strcpy( fc->filename, pFilePath );
// Add video stream
pst = avformat_new_stream( fc, 0 );
vVideoStreamIdx = pst->index;
fprintf(stderr,"Video Stream:%d",vVideoStreamIdx);
pcc = pst->codec;
avcodec_get_context_defaults3( pcc, AVMEDIA_TYPE_VIDEO );
// Save the stream as origin setting without convert
pcc->codec_type = pCodecCtx->codec_type;
pcc->codec_id = pCodecCtx->codec_id;
pcc->bit_rate = pCodecCtx->bit_rate;
pcc->width = pCodecCtx->width;
pcc->height = pCodecCtx->height;
if(fps==0)
{
double fps=0.0;
AVRational pTimeBase;
pTimeBase.num = pCodecCtx->time_base.num;
pTimeBase.den = pCodecCtx->time_base.den;
fps = 1.0/ av_q2d(pCodecCtx->time_base)/ FFMAX(pCodecCtx->ticks_per_frame, 1);
fprintf(stderr,"fps_method(tbc): 1/av_q2d()=%g",fps);
pcc->time_base.num = 1;
pcc->time_base.den = fps;
}
else
{
pcc->time_base.num = 1;
pcc->time_base.den = fps;
}
// reference ffmpeg\libavformat\utils.c
// For SPS and PPS in avcC container
pcc->extradata = malloc(sizeof(uint8_t)*pCodecCtx->extradata_size);
memcpy(pcc->extradata, pCodecCtx->extradata, pCodecCtx->extradata_size);
pcc->extradata_size = pCodecCtx->extradata_size;
// For Audio stream
if(pAudioCodecCtx)
{
AVCodec *pAudioCodec=NULL;
AVStream *pst2=NULL;
pAudioCodec = avcodec_find_encoder(AV_CODEC_ID_AAC);
// Add audio stream
pst2 = avformat_new_stream( fc, pAudioCodec );
vAudioStreamIdx = pst2->index;
pAudioOutputCodecContext = pst2->codec;
avcodec_get_context_defaults3( pAudioOutputCodecContext, pAudioCodec );
fprintf(stderr,"Audio Stream:%d",vAudioStreamIdx);
fprintf(stderr,"pAudioCodecCtx->bits_per_coded_sample=%d",pAudioCodecCtx->bits_per_coded_sample);
pAudioOutputCodecContext->codec_type = AVMEDIA_TYPE_AUDIO;
pAudioOutputCodecContext->codec_id = AV_CODEC_ID_AAC;
// Copy the codec attributes
pAudioOutputCodecContext->channels = pAudioCodecCtx->channels;
pAudioOutputCodecContext->channel_layout = pAudioCodecCtx->channel_layout;
pAudioOutputCodecContext->sample_rate = pAudioCodecCtx->sample_rate;
pAudioOutputCodecContext->bit_rate = 12000;//pAudioCodecCtx->sample_rate * pAudioCodecCtx->bits_per_coded_sample;
pAudioOutputCodecContext->bits_per_coded_sample = pAudioCodecCtx->bits_per_coded_sample;
pAudioOutputCodecContext->profile = pAudioCodecCtx->profile;
//FF_PROFILE_AAC_LOW;
// pAudioCodecCtx->bit_rate;
// AV_SAMPLE_FMT_U8P, AV_SAMPLE_FMT_S16P
//pAudioOutputCodecContext->sample_fmt = AV_SAMPLE_FMT_FLTP;//pAudioCodecCtx->sample_fmt;
pAudioOutputCodecContext->sample_fmt = pAudioCodecCtx->sample_fmt;
//pAudioOutputCodecContext->sample_fmt = AV_SAMPLE_FMT_U8;
pAudioOutputCodecContext->sample_aspect_ratio = pAudioCodecCtx->sample_aspect_ratio;
pAudioOutputCodecContext->time_base.num = pAudioCodecCtx->time_base.num;
pAudioOutputCodecContext->time_base.den = pAudioCodecCtx->time_base.den;
pAudioOutputCodecContext->ticks_per_frame = pAudioCodecCtx->ticks_per_frame;
pAudioOutputCodecContext->frame_size = 1024;
fprintf(stderr,"profile:%d, sample_rate:%d, channles:%d", pAudioOutputCodecContext->profile, pAudioOutputCodecContext->sample_rate, pAudioOutputCodecContext->channels);
AVDictionary *opts = NULL;
av_dict_set(&opts, "strict", "experimental", 0);
if (avcodec_open2(pAudioOutputCodecContext, pAudioCodec, &opts) < 0) {
fprintf(stderr, "\ncould not open codec\n");
}
av_dict_free(&opts);
#if 0
// For Audio, this part is no need
if(pAudioCodecCtx->extradata_size!=0)
{
NSLog(#"extradata_size !=0");
pAudioOutputCodecContext->extradata = malloc(sizeof(uint8_t)*pAudioCodecCtx->extradata_size);
memcpy(pAudioOutputCodecContext->extradata, pAudioCodecCtx->extradata, pAudioCodecCtx->extradata_size);
pAudioOutputCodecContext->extradata_size = pAudioCodecCtx->extradata_size;
}
else
{
// For WMA test only
pAudioOutputCodecContext->extradata_size = 0;
NSLog(#"extradata_size ==0");
}
#endif
}
if(fc->oformat->flags & AVFMT_GLOBALHEADER)
{
pcc->flags |= CODEC_FLAG_GLOBAL_HEADER;
pAudioOutputCodecContext->flags |= CODEC_FLAG_GLOBAL_HEADER;
}
if ( !( fc->oformat->flags & AVFMT_NOFILE ) )
{
vRet = avio_open( &fc->pb, fc->filename, AVIO_FLAG_WRITE );
if(vRet!=0)
{
fprintf(stderr,"avio_open(%s) error", fc->filename);
}
}
// dump format in console
av_dump_format(fc, 0, pFilePath, 1);
vRet = avformat_write_header( fc, NULL );
if(vRet==0)
return 1;
else
return 0;
}
av_interleaved_write_frame:
void h264_file_write_frame(AVFormatContext *fc, int vStreamIdx, const void* p, int len, int64_t dts, int64_t pts )
{
AVStream *pst = NULL;
AVPacket pkt;
if ( 0 > vVideoStreamIdx )
return;
// may be audio or video
pst = fc->streams[ vStreamIdx ];
// Init packet
av_init_packet( &pkt );
if(vStreamIdx ==vVideoStreamIdx)
{
pkt.flags |= ( 0 >= getVopType( p, len ) ) ? AV_PKT_FLAG_KEY : 0;
//pkt.flags |= AV_PKT_FLAG_KEY;
pkt.stream_index = pst->index;
pkt.data = (uint8_t*)p;
pkt.size = len;
pkt.dts = AV_NOPTS_VALUE;
pkt.pts = AV_NOPTS_VALUE;
// TODO: mark or unmark the log
//fprintf(stderr, "dts=%lld, pts=%lld\n",dts,pts);
// av_write_frame( fc, &pkt );
}
av_interleaved_write_frame( fc, &pkt );
}
av_write_trailer:
void h264_file_close(AVFormatContext *fc)
{
if ( !fc )
return;
av_write_trailer( fc );
if ( fc->oformat && !( fc->oformat->flags & AVFMT_NOFILE ) && fc->pb )
avio_close( fc->pb );
av_free( fc );
}
Thanks.
It looks like you're using the same AVFormatContext for both the input and output?
In the line
pst = fc->streams[ vStreamIdx ];
You're assigning the AVStream* from your AVFormatContext connected with your input (RTSP stream). But then later on you're trying to write the packet back to the same context av_interleaved_write_frame( fc, &pkt );. I kind of think of a context as a file which has helped me navagate this type of thing better. I do something identicial to what you're doing (not iOS though) where I use a separate AVFormatContext for each of the input (RTSP stream) and output (mp4 file). If I'm correct, I think what you just need to do is initialize an AVFormatContext and properly.
The following code (without error checking everything) is what I do to take an AVFormatContext * output_format_context = NULL and the AVFormatContext * input_format_context that I had associated with the RTSP stream and write from one to the other. This is after I have fetched a packet, etc., which in your case it looks like you're populating (I just take the packet from av_read_frame and re-package it.
This is code that could be in your write frame function (but it also does include the writing of the header).
AVFormatContext * output_format_context;
AVStream * in_stream_2;
AVStream * out_stream_2;
// Allocate the context with the output file
avformat_alloc_output_context2(&output_format_context, NULL, NULL, out_filename.c_str());
// Point to AVOutputFormat * output_format for manipulation
output_format = output_format_context->oformat;
// Loop through all streams
for (i = 0; i < input_format_context->nb_streams; i++) {
// Create a pointer to the input stream that was allocated earlier in the code
AVStream *in_stream = input_format_context->streams[i];
// Create a pointer to a new stream that will be part of the output
AVStream *out_stream = avformat_new_stream(output_format_context, in_stream->codec->codec);
// Set time_base of the new output stream to equal the input stream one since I'm not changing anything (can avoid but get a deprecation warning)
out_stream->time_base = in_stream->time_base;
// This is the non-deprecated way of copying all the parameters from the input stream into the output stream since everything stays the same
avcodec_parameters_from_context(out_stream->codecpar, in_stream->codec);
// I don't remember what this is for :)
out_stream->codec->codec_tag = 0;
// This just sets a flag from the format context to the stream relating to the header
if (output_format_context->oformat->flags & AVFMT_GLOBALHEADER)
out_stream->codec->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
}
// Check NOFILE flag and open the output file context (previously the output file was associated with the format context, now it is actually opened.
if (!(output_format->flags & AVFMT_NOFILE))
avio_open(&output_format_context->pb, out_filename.c_str(), AVIO_FLAG_WRITE);
// Write the header (not sure if this is always needed but h264 I believe it is.
avformat_write_header(output_format_context,NULL);
// Re-getting the appropriate stream that was populated above (this should allow for both audio/video)
in_stream_2 = input_format_context->streams[packet.stream_index];
out_stream_2 = output_format_context->streams[packet.stream_index];
// Rescaling pts and dts, duration and pos - you would do as you need in your code.
packet.pts = av_rescale_q_rnd(packet.pts, in_stream_2->time_base, out_stream_2->time_base, (AVRounding) (AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
packet.dts = av_rescale_q_rnd(packet.dts, in_stream_2->time_base, out_stream_2->time_base, (AVRounding) (AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
packet.duration = av_rescale_q(packet.duration, in_stream_2->time_base, out_stream_2->time_base);
packet.pos = -1;
// The first packet of my stream always gives me negative dts/pts so this just protects that first one for my purposes. You probably don't need.
if (packet.dts < 0) packet.dts = 0;
if (packet.pts < 0) packet.pts = 0;
// Finally write the frame
av_interleaved_write_frame(output_format_context, &packet);
// ....
// Write header, close/cleanup... etc
// ....
This code is fairly bare bones and doesn't include the setup (which it sounds like you're doing correctly anyway). I would also imagine this code could be cleaned up and tweaked for your purposes, but this works for me to re-write the RTSP stream into a file (in my case many files but code not shown).
The code is C code, so you might need to do minor tweaks for making it Swift compatible (for some of the library function calls maybe). I think overall it should be compatible though.
Hopefully this helps point you to the right direction. This was cobbled together thanks to several sample code sources (I don't remember where), along with warning prompts from the libraries themselves.

Resolving SRV records with iOS SDK

I want to resolve DNS SRV records using the iOS SDK.
I've already tried the high-level Bonjour APIs Apple is providing, but they're not what I need. Now I'm using DNS SD.
void *processQueryForSRVRecord(void *record) {
DNSServiceRef sdRef;
int context;
printf("Setting up query for record: %s\n", record);
DNSServiceQueryRecord(&sdRef, 0, 0, record, kDNSServiceType_SRV, kDNSServiceClass_IN, callback, &context);
printf("Processing query for record: %s\n", record);
DNSServiceProcessResult(sdRef);
printf("Deallocating query for record: %s\n", record);
DNSServiceRefDeallocate(sdRef);
return NULL;
}
This works as long as it gets only correct SRV records (for example: _xmpp-server._tcp.gmail.com), but when the record is typed wrong, DNSServiceProcessResult(sdRef) goes into an infinite loop.
Is there a way to stop DNSServiceProcessResult or must I cancel the thread calling it?
Use good old select(). This is what I have at the moment:
- (void)updateDnsRecords
{
if (self.dnsUpdatePending == YES)
{
return;
}
else
{
self.dnsUpdatePending = YES;
}
NSLog(#"DNS update");
DNSServiceRef sdRef;
DNSServiceErrorType err;
const char* host = [self.dnsHost UTF8String];
if (host != NULL)
{
NSTimeInterval remainingTime = self.dnsUpdateTimeout;
NSDate* startTime = [NSDate date];
err = DNSServiceQueryRecord(&sdRef, 0, 0,
host,
kDNSServiceType_SRV,
kDNSServiceClass_IN,
processDnsReply,
&remainingTime);
// This is necessary so we don't hang forever if there are no results
int dns_sd_fd = DNSServiceRefSockFD(sdRef);
int nfds = dns_sd_fd + 1;
fd_set readfds;
int result;
while (remainingTime > 0)
{
FD_ZERO(&readfds);
FD_SET(dns_sd_fd, &readfds);
struct timeval tv;
tv.tv_sec = (time_t)remainingTime;
tv.tv_usec = (remainingTime - tv.tv_sec) * 1000000;
result = select(nfds, &readfds, (fd_set*)NULL, (fd_set*)NULL, &tv);
if (result == 1)
{
if (FD_ISSET(dns_sd_fd, &readfds))
{
err = DNSServiceProcessResult(sdRef);
if (err != kDNSServiceErr_NoError)
{
NSLog(#"There was an error reading the DNS SRV records.");
break;
}
}
}
else if (result == 0)
{
NBLog(#"DNS SRV select() timed out");
break;
}
else
{
if (errno == EINTR)
{
NBLog(#"DNS SRV select() interrupted, retry.");
}
else
{
NBLog(#"DNS SRV select() returned %d errno %d %s.", result, errno, strerror(errno));
break;
}
}
NSTimeInterval elapsed = [[NSDate date] timeIntervalSinceDate:startTime];
remainingTime -= elapsed;
}
DNSServiceRefDeallocate(sdRef);
}
}
static void processDnsReply(DNSServiceRef sdRef,
DNSServiceFlags flags,
uint32_t interfaceIndex,
DNSServiceErrorType errorCode,
const char* fullname,
uint16_t rrtype,
uint16_t rrclass,
uint16_t rdlen,
const void* rdata,
uint32_t ttl,
void* context)
{
NSTimeInterval* remainingTime = (NSTimeInterval*)context;
// If a timeout occurs the value of the errorCode argument will be
// kDNSServiceErr_Timeout.
if (errorCode != kDNSServiceErr_NoError)
{
return;
}
// The flags argument will have the kDNSServiceFlagsAdd bit set if the
// callback is being invoked when a record is received in response to
// the query.
//
// If kDNSServiceFlagsAdd bit is clear then callback is being invoked
// because the record has expired, in which case the ttl argument will
// be 0.
if ((flags & kDNSServiceFlagsMoreComing) == 0)
{
*remainingTime = 0;
}
// Record parsing code below was copied from Apple SRVResolver sample.
NSMutableData * rrData = [NSMutableData data];
dns_resource_record_t * rr;
uint8_t u8;
uint16_t u16;
uint32_t u32;
u8 = 0;
[rrData appendBytes:&u8 length:sizeof(u8)];
u16 = htons(kDNSServiceType_SRV);
[rrData appendBytes:&u16 length:sizeof(u16)];
u16 = htons(kDNSServiceClass_IN);
[rrData appendBytes:&u16 length:sizeof(u16)];
u32 = htonl(666);
[rrData appendBytes:&u32 length:sizeof(u32)];
u16 = htons(rdlen);
[rrData appendBytes:&u16 length:sizeof(u16)];
[rrData appendBytes:rdata length:rdlen];
rr = dns_parse_resource_record([rrData bytes], (uint32_t) [rrData length]);
// If the parse is successful, add the results.
if (rr != NULL)
{
NSString *target;
target = [NSString stringWithCString:rr->data.SRV->target encoding:NSASCIIStringEncoding];
if (target != nil)
{
uint16_t priority = rr->data.SRV->priority;
uint16_t weight = rr->data.SRV->weight;
uint16_t port = rr->data.SRV->port;
[[FailoverWebInterface sharedInterface] addDnsServer:target priority:priority weight:weight port:port ttl:ttl]; // You'll have to do this in with your own method.
}
}
dns_free_resource_record(rr);
}
Here's the Apple SRVResolver sample from which I got the RR parsing.
This Apple sample mentions that it may block forever, but strange enough suggest to use NSTimer when trying to add a timeout yourself. But I think using select() is a much better way.
I have one to-do: Implement flushing cache with DNSServiceReconfirmRecord. But won't do that now.
Be aware, this code is working, but I'm still testing it.
You need to add libresolv.dylib to your Xcode project's 'Linked Frameworks and Libraries'.

udp winsock programming

I'm the beginner in socket programming. I want to receive udp packets continuously from the port. For that I created socket and using bind and recv calls I have done with my program. In a buffer I'm storing the udp packets. How to receive packet by packet. How to put condition for particular time interval? Thanks in advance.
static int recvData = 1;
sockID = socket(AF_INET, SOCK_DGRAM, 0);
if(sockID < 0)
{
printf("Socket creation error\n");
WSACleanup();
}
else
{
printf("Socket Created\n");
}
fepAddr.sin_family = AF_INET;
fepAddr.sin_port = htons(inputData.portNo);
fepAddr.sin_addr.s_addr = inet_addr(inputData.destIPAddr);
if (bind(sockID, (struct sockaddr *)&fepAddr, sizeof(fepAddr)) == SOCKET_ERROR)
{
printf("bind() failed: %ld.\n", WSAGetLastError());
closesocket(sockID);
return 0;
}
else
{
printf("bind() is OK!\n");
}
memset(udpBuf,sizeof(udpBuf),0);
while (recvData)
{
printf("receiving data\n");
recvResult = recvfrom( sockID, udpBuf, sizeof(udpBuf), 0,(struct sockaddr *)&fepAddr, &sock_len);
fprintf(udp, "%s", udpBuf);
//fwrite(udpBuf, sizeof(udpBuf), 1, udp);
recvData-- ;
}
exit:
if(udp)
{
fclose(udp);
udp = 0;
}
//shutdown socket
closesocket(sockID);
fclose(udp);
recvfrom() receives UDP data packet-by-packet. If a given packet is too large, recvfrom() will return an error. As for timing, you can use select() to know when the socket is readable.
Try something like this:
sockID = socket(AF_INET, SOCK_DGRAM, 0);
if (sockID == INVALID_SOCKET)
{
printf("Socket creation error\n");
goto exit;
}
printf("Socket Created\n");
memset(&fepAddr, 0, sizeof(fepAddr));
fepAddr.sin_family = AF_INET;
fepAddr.sin_port = htons(inputData.portNo);
fepAddr.sin_addr.s_addr = inet_addr(inputData.destIPAddr);
if (bind(sockID, (struct sockaddr *)&fepAddr, sizeof(fepAddr)) == SOCKET_ERROR)
{
printf("bind() failed: %ld.\n", WSAGetLastError());
goto exit;
}
printf("bind() is OK!\n");
memset(udpBuf, 0, sizeof(udpBuf));
printf("receiving data\n");
while (...)
{
printf(".");
recvResult = recvfrom(sockID, udpBuf, sizeof(udpBuf), 0, (struct sockaddr *)&fepAddr, &addr_len);
if (recvResult == SOCKET_ERROR)
{
if (WSAGetLastError() != WSAEWOULDBLOCK)
{
printf("\nrecvfrom() failed: %ld.\n", WSAGetLastError());
goto exit;
}
fd_set fd;
FD_ZERO(&fd);
FD_SET(sockID, &fd);
timeval t;
t.tv_sec = ...; // seconds
t.tv_usec = ...; // microseconds
selectResult = select(0, &fd, NULL, NULL, &t);
if (selectResult == SOCKET_ERROR)
{
printf("\nselect() failed: %ld.\n", WSAGetLastError());
goto exit;
}
if (selectResult == 0)
{
printf("\nsocket timed out.\n");
goto exit;
}
continue;
}
if (recvResult > 0)
fwrite(udpBuf, recvResult, 1, udp);
}
exit:
if (udp != 0)
{
fclose(udp);
udp = 0;
}
if (sockID != INVALID_SOCKET)
{
closesocket(sockID);
sockID = INVALID_SOCKET;
}

objective c audio meter

Is it possible for xcode to have an audio level indicator?
I want to do something like this:
if (audioLevel = 100) {
}
or something similar...
Any ideas?? Example code please?
I'm VERY new to objective c so the more explaining the beter! :D
Unfortunately, there isn't a very straightforward API to do this. You need to use the low level AudioToolbox.framework.
Luckily, others have already solved this problem for you. Here's some code I simplified slightly to be straight C functions, from CocoaDev. You need to link to the AudioToolbox to compile this code (see here for documentation on how to do so).
#import <AudioToolbox/AudioServices.h>
AudioDeviceID getDefaultOutputDeviceID()
{
AudioDeviceID outputDeviceID = kAudioObjectUnknown;
// get output device device
OSStatus status = noErr;
AudioObjectPropertyAddress propertyAOPA;
propertyAOPA.mScope = kAudioObjectPropertyScopeGlobal;
propertyAOPA.mElement = kAudioObjectPropertyElementMaster;
propertyAOPA.mSelector = kAudioHardwarePropertyDefaultOutputDevice;
if (!AudioHardwareServiceHasProperty(kAudioObjectSystemObject, &propertyAOPA))
{
printf("Cannot find default output device!");
return outputDeviceID;
}
status = AudioHardwareServiceGetPropertyData(kAudioObjectSystemObject, &propertyAOPA, 0, NULL, (UInt32[]){sizeof(AudioDeviceID)}, &outputDeviceID);
if (status != 0)
{
printf("Cannot find default output device!");
}
return outputDeviceID;
}
float getVolume ()
{
Float32 outputVolume;
OSStatus status = noErr;
AudioObjectPropertyAddress propertyAOPA;
propertyAOPA.mElement = kAudioObjectPropertyElementMaster;
propertyAOPA.mSelector = kAudioHardwareServiceDeviceProperty_VirtualMasterVolume;
propertyAOPA.mScope = kAudioDevicePropertyScopeOutput;
AudioDeviceID outputDeviceID = getDefaultOutputDeviceID();
if (outputDeviceID == kAudioObjectUnknown)
{
printf("Unknown device");
return 0.0;
}
if (!AudioHardwareServiceHasProperty(outputDeviceID, &propertyAOPA))
{
printf("No volume returned for device 0x%0x", outputDeviceID);
return 0.0;
}
status = AudioHardwareServiceGetPropertyData(outputDeviceID, &propertyAOPA, 0, NULL, (UInt32[]){sizeof(Float32)}, &outputVolume);
if (status)
{
printf("No volume returned for device 0x%0x", outputDeviceID);
return 0.0;
}
if (outputVolume < 0.0 || outputVolume > 1.0) return 0.0;
return outputVolume;
}
int main (int argc, char const *argv[])
{
printf("%f", getVolume());
return 0;
}
Note that there's also a setVolume function there, too.