How to badge file and folder using Cocoa - objective-c

I want to badge a file and folder with some color (image). How can this be achieved?
I tried with icon service, and it works for files, but it is not working with folders.
I saw this behavior working Dropbox (10.4, 10.5 and 10.6)- how can this be done?
The blog post Cocoa Tutorial: Custom Folder Icons was very close one for me, but it was not working as expected.
Is there another solution other than icon service?

The following function is the solution I found for the problem
BOOL AddBadgeToItem(NSString* path,NSData* tag)
{
FSCatalogInfo info;
FSRef par;
FSRef ref;
Boolean dir = false;
if (tag&&(FSPathMakeRef([path fileSystemRepresentation],&par,&dir)==noErr))
{
HFSUniStr255 fork = {0,{0}};
sint16 refnum = kResFileNotOpened;
FSGetResourceForkName(&fork);
if (dir)
{
NSString *name = #"Icon\r";
memset(&info,0,sizeof(info));
((FileInfo*)(&info.finderInfo))->finderFlags = kIsInvisible;
OSErr error = FSCreateResourceFile(&par,[name lengthOfBytesUsingEncoding:NSUTF16LittleEndianStringEncoding],(UniChar*)[name cStringUsingEncoding:NSUTF16LittleEndianStringEncoding],kFSCatInfoFinderXInfo,&info,fork.length, fork.unicode,&ref,NULL);
if( error == dupFNErr )
{
// file already exists; prepare to try to open it
const char *iconFileSystemPath = [[path stringByAppendingPathComponent:#"\000I\000c\000o\000n\000\r"] fileSystemRepresentation];
OSStatus status = FSPathMakeRef((const UInt8 *)iconFileSystemPath, &ref, NULL);
if (status != noErr)
{
fprintf(stderr, "error: FSPathMakeRef() returned %d for file \"%s\"\n", (int)status, iconFileSystemPath);
}
}else if ( error != noErr)
{
return NO;
}
}
else
{
BlockMoveData(&par,&ref,sizeof(FSRef));
if (FSCreateResourceFork(&ref,fork.length,fork.unicode,0)!=noErr)
{
//test
if (FSOpenResourceFile(&ref,fork.length,fork.unicode,fsRdWrPerm,&refnum)!=noErr) {
return NO;
}
if (refnum!=kResFileNotOpened) {
UpdateResFile(refnum);
CloseResFile(refnum);
if (FSGetCatalogInfo(&par,kFSCatInfoFinderXInfo,&info,NULL,NULL,NULL)==noErr) {
((ExtendedFileInfo*)(&info.extFinderInfo))->extendedFinderFlags = kExtendedFlagsAreInvalid;
FSSetCatalogInfo(&par,kFSCatInfoFinderXInfo,&info);
}
}
//Test end
return NO;
}
}
OSErr errorr = FSOpenResourceFile(&ref,fork.length,fork.unicode,fsRdWrPerm,&refnum);
if (errorr!=noErr) {
return NO;
}
if (refnum!=kResFileNotOpened) {
CustomBadgeResource* cbr;
int len = [tag length];
Handle h = NewHandle(len);
if (h) {
BlockMoveData([tag bytes],*h,len);
AddResource(h,kIconFamilyType,128,"\p");
WriteResource(h);
ReleaseResource(h);
}
h = NewHandle(sizeof(CustomBadgeResource));
if (h) {
cbr = (CustomBadgeResource*)*h;
memset(cbr,0,sizeof(CustomBadgeResource));
cbr->version = kCustomBadgeResourceVersion;
cbr->customBadgeResourceID = 128;
AddResource(h,kCustomBadgeResourceType,kCustomBadgeResourceID,"\p");
WriteResource(h);
ReleaseResource(h);
}
UpdateResFile(refnum);
CloseResFile(refnum);
if (FSGetCatalogInfo(&par,kFSCatInfoFinderXInfo,&info,NULL,NULL,NULL)==noErr) {
((ExtendedFileInfo*)(&info.extFinderInfo))->extendedFinderFlags = kExtendedFlagHasCustomBadge;
FSSetCatalogInfo(&par,kFSCatInfoFinderXInfo,&info);
}
}
}
return NO;
}

You can do this using the -setIcon:forFile:options: method on NSWorkspace, which lets you just specify an NSImage to apply to the file/folder at the path you give it.

The intended approach is to create a Finder Sync app extension.

Related

Record rtsp stream with ffmpeg in iOS

I've followed iFrameExtractor to successfully stream rtsp in my swift project. In this project, it also has recording function. It basically use avformat_write_header
, av_interleaved_write_frame and av_write_trailer to save the rtsp source into mp4 file.
When I used this project in my device, the rtsp streaming works fine, but recording function will always generate a blank mp4 file with no image and sound.
Could anyone tell me what step that I miss?
I'm using iPhone5 with iOS 9.1 and XCode 7.1.1.
The ffmpeg is 2.8.3 version and followed the compile instruction by CompilationGuide – FFmpeg
Following is the sample code in this project
The function that generate every frame:
-(BOOL)stepFrame {
// AVPacket packet;
int frameFinished=0;
static bool bFirstIFrame=false;
static int64_t vPTS=0, vDTS=0, vAudioPTS=0, vAudioDTS=0;
while(!frameFinished && av_read_frame(pFormatCtx, &packet)>=0) {
// Is this a packet from the video stream?
if(packet.stream_index==videoStream) {
// 20130525 albert.liao modified start
// Initialize a new format context for writing file
if(veVideoRecordState!=eH264RecIdle)
{
switch(veVideoRecordState)
{
case eH264RecInit:
{
if ( !pFormatCtx_Record )
{
int bFlag = 0;
//NSString *videoPath = [NSHomeDirectory() stringByAppendingPathComponent:#"Documents/test.mp4"];
NSString *videoPath = #"/Users/liaokuohsun/iFrameTest.mp4";
const char *file = [videoPath UTF8String];
pFormatCtx_Record = avformat_alloc_context();
bFlag = h264_file_create(file, pFormatCtx_Record, pCodecCtx, pAudioCodecCtx,/*fps*/0.0, packet.data, packet.size );
if(bFlag==true)
{
veVideoRecordState = eH264RecActive;
fprintf(stderr, "h264_file_create success\n");
}
else
{
veVideoRecordState = eH264RecIdle;
fprintf(stderr, "h264_file_create error\n");
}
}
}
//break;
case eH264RecActive:
{
if((bFirstIFrame==false) &&(packet.flags&AV_PKT_FLAG_KEY)==AV_PKT_FLAG_KEY)
{
bFirstIFrame=true;
vPTS = packet.pts ;
vDTS = packet.dts ;
#if 0
NSRunLoop *pRunLoop = [NSRunLoop currentRunLoop];
[pRunLoop addTimer:RecordingTimer forMode:NSDefaultRunLoopMode];
#else
[NSTimer scheduledTimerWithTimeInterval:5.0//2.0
target:self
selector:#selector(StopRecording:)
userInfo:nil
repeats:NO];
#endif
}
// Record audio when 1st i-Frame is obtained
if(bFirstIFrame==true)
{
if ( pFormatCtx_Record )
{
#if PTS_DTS_IS_CORRECT==1
packet.pts = packet.pts - vPTS;
packet.dts = packet.dts - vDTS;
#endif
h264_file_write_frame( pFormatCtx_Record, packet.stream_index, packet.data, packet.size, packet.dts, packet.pts);
}
else
{
NSLog(#"pFormatCtx_Record no exist");
}
}
}
break;
case eH264RecClose:
{
if ( pFormatCtx_Record )
{
h264_file_close(pFormatCtx_Record);
#if 0
// 20130607 Test
dispatch_async(dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT, 0), ^(void)
{
ALAssetsLibrary *library = [[ALAssetsLibrary alloc]init];
NSString *filePathString = [NSHomeDirectory() stringByAppendingPathComponent:#"Documents/test.mp4"];
NSURL *filePathURL = [NSURL fileURLWithPath:filePathString isDirectory:NO];
if(1)// ([library videoAtPathIsCompatibleWithSavedPhotosAlbum:filePathURL])
{
[library writeVideoAtPathToSavedPhotosAlbum:filePathURL completionBlock:^(NSURL *assetURL, NSError *error){
if (error) {
// TODO: error handling
NSLog(#"writeVideoAtPathToSavedPhotosAlbum error");
} else {
// TODO: success handling
NSLog(#"writeVideoAtPathToSavedPhotosAlbum success");
}
}];
}
[library release];
});
#endif
vPTS = 0;
vDTS = 0;
vAudioPTS = 0;
vAudioDTS = 0;
pFormatCtx_Record = NULL;
NSLog(#"h264_file_close() is finished");
}
else
{
NSLog(#"fc no exist");
}
bFirstIFrame = false;
veVideoRecordState = eH264RecIdle;
}
break;
default:
if ( pFormatCtx_Record )
{
h264_file_close(pFormatCtx_Record);
pFormatCtx_Record = NULL;
}
NSLog(#"[ERROR] unexpected veVideoRecordState!!");
veVideoRecordState = eH264RecIdle;
break;
}
}
// Decode video frame
avcodec_decode_video2(pCodecCtx, pFrame, &frameFinished, &packet);
}
else if(packet.stream_index==audioStream)
{
// 20131024 albert.liao modfied start
static int vPktCount=0;
BOOL bIsAACADTS = FALSE;
int ret = 0;
if(aPlayer.vAACType == eAAC_UNDEFINED)
{
tAACADTSHeaderInfo vxAACADTSHeaderInfo = {0};
bIsAACADTS = [AudioUtilities parseAACADTSHeader:(uint8_t *)packet.data ToHeader:&vxAACADTSHeaderInfo];
}
#synchronized(aPlayer)
{
if(aPlayer==nil)
{
aPlayer = [[AudioPlayer alloc]initAudio:nil withCodecCtx:(AVCodecContext *) pAudioCodecCtx];
NSLog(#"aPlayer initAudio");
if(bIsAACADTS)
{
aPlayer.vAACType = eAAC_ADTS;
//NSLog(#"is ADTS AAC");
}
}
else
{
if(vPktCount<5) // The voice is listened once image is rendered
{
vPktCount++;
}
else
{
if([aPlayer getStatus]!=eAudioRunning)
{
dispatch_async(dispatch_get_main_queue(), ^(void) {
#synchronized(aPlayer)
{
NSLog(#"aPlayer start play");
[aPlayer Play];
}
});
}
}
}
};
#synchronized(aPlayer)
{
int ret = 0;
ret = [aPlayer putAVPacket:&packet];
if(ret <= 0)
NSLog(#"Put Audio Packet Error!!");
}
// 20131024 albert.liao modfied end
if(bFirstIFrame==true)
{
switch(veVideoRecordState)
{
case eH264RecActive:
{
if ( pFormatCtx_Record )
{
h264_file_write_audio_frame(pFormatCtx_Record, pAudioCodecCtx, packet.stream_index, packet.data, packet.size, packet.dts, packet.pts);
}
else
{
NSLog(#"pFormatCtx_Record no exist");
}
}
}
}
}
else
{
//fprintf(stderr, "packet len=%d, Byte=%02X%02X%02X%02X%02X\n",\
packet.size, packet.data[0],packet.data[1],packet.data[2],packet.data[3], packet.data[4]);
}
// 20130525 albert.liao modified end
}
return frameFinished!=0;
}
avformat_write_header:
int h264_file_create(const char *pFilePath, AVFormatContext *fc, AVCodecContext *pCodecCtx,AVCodecContext *pAudioCodecCtx, double fps, void *p, int len )
{
int vRet=0;
AVOutputFormat *of=NULL;
AVStream *pst=NULL;
AVCodecContext *pcc=NULL, *pAudioOutputCodecContext=NULL;
avcodec_register_all();
av_register_all();
av_log_set_level(AV_LOG_VERBOSE);
if(!pFilePath)
{
fprintf(stderr, "FilePath no exist");
return -1;
}
if(!fc)
{
fprintf(stderr, "AVFormatContext no exist");
return -1;
}
fprintf(stderr, "file=%s\n",pFilePath);
// Create container
of = av_guess_format( 0, pFilePath, 0 );
fc->oformat = of;
strcpy( fc->filename, pFilePath );
// Add video stream
pst = avformat_new_stream( fc, 0 );
vVideoStreamIdx = pst->index;
fprintf(stderr,"Video Stream:%d",vVideoStreamIdx);
pcc = pst->codec;
avcodec_get_context_defaults3( pcc, AVMEDIA_TYPE_VIDEO );
// Save the stream as origin setting without convert
pcc->codec_type = pCodecCtx->codec_type;
pcc->codec_id = pCodecCtx->codec_id;
pcc->bit_rate = pCodecCtx->bit_rate;
pcc->width = pCodecCtx->width;
pcc->height = pCodecCtx->height;
if(fps==0)
{
double fps=0.0;
AVRational pTimeBase;
pTimeBase.num = pCodecCtx->time_base.num;
pTimeBase.den = pCodecCtx->time_base.den;
fps = 1.0/ av_q2d(pCodecCtx->time_base)/ FFMAX(pCodecCtx->ticks_per_frame, 1);
fprintf(stderr,"fps_method(tbc): 1/av_q2d()=%g",fps);
pcc->time_base.num = 1;
pcc->time_base.den = fps;
}
else
{
pcc->time_base.num = 1;
pcc->time_base.den = fps;
}
// reference ffmpeg\libavformat\utils.c
// For SPS and PPS in avcC container
pcc->extradata = malloc(sizeof(uint8_t)*pCodecCtx->extradata_size);
memcpy(pcc->extradata, pCodecCtx->extradata, pCodecCtx->extradata_size);
pcc->extradata_size = pCodecCtx->extradata_size;
// For Audio stream
if(pAudioCodecCtx)
{
AVCodec *pAudioCodec=NULL;
AVStream *pst2=NULL;
pAudioCodec = avcodec_find_encoder(AV_CODEC_ID_AAC);
// Add audio stream
pst2 = avformat_new_stream( fc, pAudioCodec );
vAudioStreamIdx = pst2->index;
pAudioOutputCodecContext = pst2->codec;
avcodec_get_context_defaults3( pAudioOutputCodecContext, pAudioCodec );
fprintf(stderr,"Audio Stream:%d",vAudioStreamIdx);
fprintf(stderr,"pAudioCodecCtx->bits_per_coded_sample=%d",pAudioCodecCtx->bits_per_coded_sample);
pAudioOutputCodecContext->codec_type = AVMEDIA_TYPE_AUDIO;
pAudioOutputCodecContext->codec_id = AV_CODEC_ID_AAC;
// Copy the codec attributes
pAudioOutputCodecContext->channels = pAudioCodecCtx->channels;
pAudioOutputCodecContext->channel_layout = pAudioCodecCtx->channel_layout;
pAudioOutputCodecContext->sample_rate = pAudioCodecCtx->sample_rate;
pAudioOutputCodecContext->bit_rate = 12000;//pAudioCodecCtx->sample_rate * pAudioCodecCtx->bits_per_coded_sample;
pAudioOutputCodecContext->bits_per_coded_sample = pAudioCodecCtx->bits_per_coded_sample;
pAudioOutputCodecContext->profile = pAudioCodecCtx->profile;
//FF_PROFILE_AAC_LOW;
// pAudioCodecCtx->bit_rate;
// AV_SAMPLE_FMT_U8P, AV_SAMPLE_FMT_S16P
//pAudioOutputCodecContext->sample_fmt = AV_SAMPLE_FMT_FLTP;//pAudioCodecCtx->sample_fmt;
pAudioOutputCodecContext->sample_fmt = pAudioCodecCtx->sample_fmt;
//pAudioOutputCodecContext->sample_fmt = AV_SAMPLE_FMT_U8;
pAudioOutputCodecContext->sample_aspect_ratio = pAudioCodecCtx->sample_aspect_ratio;
pAudioOutputCodecContext->time_base.num = pAudioCodecCtx->time_base.num;
pAudioOutputCodecContext->time_base.den = pAudioCodecCtx->time_base.den;
pAudioOutputCodecContext->ticks_per_frame = pAudioCodecCtx->ticks_per_frame;
pAudioOutputCodecContext->frame_size = 1024;
fprintf(stderr,"profile:%d, sample_rate:%d, channles:%d", pAudioOutputCodecContext->profile, pAudioOutputCodecContext->sample_rate, pAudioOutputCodecContext->channels);
AVDictionary *opts = NULL;
av_dict_set(&opts, "strict", "experimental", 0);
if (avcodec_open2(pAudioOutputCodecContext, pAudioCodec, &opts) < 0) {
fprintf(stderr, "\ncould not open codec\n");
}
av_dict_free(&opts);
#if 0
// For Audio, this part is no need
if(pAudioCodecCtx->extradata_size!=0)
{
NSLog(#"extradata_size !=0");
pAudioOutputCodecContext->extradata = malloc(sizeof(uint8_t)*pAudioCodecCtx->extradata_size);
memcpy(pAudioOutputCodecContext->extradata, pAudioCodecCtx->extradata, pAudioCodecCtx->extradata_size);
pAudioOutputCodecContext->extradata_size = pAudioCodecCtx->extradata_size;
}
else
{
// For WMA test only
pAudioOutputCodecContext->extradata_size = 0;
NSLog(#"extradata_size ==0");
}
#endif
}
if(fc->oformat->flags & AVFMT_GLOBALHEADER)
{
pcc->flags |= CODEC_FLAG_GLOBAL_HEADER;
pAudioOutputCodecContext->flags |= CODEC_FLAG_GLOBAL_HEADER;
}
if ( !( fc->oformat->flags & AVFMT_NOFILE ) )
{
vRet = avio_open( &fc->pb, fc->filename, AVIO_FLAG_WRITE );
if(vRet!=0)
{
fprintf(stderr,"avio_open(%s) error", fc->filename);
}
}
// dump format in console
av_dump_format(fc, 0, pFilePath, 1);
vRet = avformat_write_header( fc, NULL );
if(vRet==0)
return 1;
else
return 0;
}
av_interleaved_write_frame:
void h264_file_write_frame(AVFormatContext *fc, int vStreamIdx, const void* p, int len, int64_t dts, int64_t pts )
{
AVStream *pst = NULL;
AVPacket pkt;
if ( 0 > vVideoStreamIdx )
return;
// may be audio or video
pst = fc->streams[ vStreamIdx ];
// Init packet
av_init_packet( &pkt );
if(vStreamIdx ==vVideoStreamIdx)
{
pkt.flags |= ( 0 >= getVopType( p, len ) ) ? AV_PKT_FLAG_KEY : 0;
//pkt.flags |= AV_PKT_FLAG_KEY;
pkt.stream_index = pst->index;
pkt.data = (uint8_t*)p;
pkt.size = len;
pkt.dts = AV_NOPTS_VALUE;
pkt.pts = AV_NOPTS_VALUE;
// TODO: mark or unmark the log
//fprintf(stderr, "dts=%lld, pts=%lld\n",dts,pts);
// av_write_frame( fc, &pkt );
}
av_interleaved_write_frame( fc, &pkt );
}
av_write_trailer:
void h264_file_close(AVFormatContext *fc)
{
if ( !fc )
return;
av_write_trailer( fc );
if ( fc->oformat && !( fc->oformat->flags & AVFMT_NOFILE ) && fc->pb )
avio_close( fc->pb );
av_free( fc );
}
Thanks.
It looks like you're using the same AVFormatContext for both the input and output?
In the line
pst = fc->streams[ vStreamIdx ];
You're assigning the AVStream* from your AVFormatContext connected with your input (RTSP stream). But then later on you're trying to write the packet back to the same context av_interleaved_write_frame( fc, &pkt );. I kind of think of a context as a file which has helped me navagate this type of thing better. I do something identicial to what you're doing (not iOS though) where I use a separate AVFormatContext for each of the input (RTSP stream) and output (mp4 file). If I'm correct, I think what you just need to do is initialize an AVFormatContext and properly.
The following code (without error checking everything) is what I do to take an AVFormatContext * output_format_context = NULL and the AVFormatContext * input_format_context that I had associated with the RTSP stream and write from one to the other. This is after I have fetched a packet, etc., which in your case it looks like you're populating (I just take the packet from av_read_frame and re-package it.
This is code that could be in your write frame function (but it also does include the writing of the header).
AVFormatContext * output_format_context;
AVStream * in_stream_2;
AVStream * out_stream_2;
// Allocate the context with the output file
avformat_alloc_output_context2(&output_format_context, NULL, NULL, out_filename.c_str());
// Point to AVOutputFormat * output_format for manipulation
output_format = output_format_context->oformat;
// Loop through all streams
for (i = 0; i < input_format_context->nb_streams; i++) {
// Create a pointer to the input stream that was allocated earlier in the code
AVStream *in_stream = input_format_context->streams[i];
// Create a pointer to a new stream that will be part of the output
AVStream *out_stream = avformat_new_stream(output_format_context, in_stream->codec->codec);
// Set time_base of the new output stream to equal the input stream one since I'm not changing anything (can avoid but get a deprecation warning)
out_stream->time_base = in_stream->time_base;
// This is the non-deprecated way of copying all the parameters from the input stream into the output stream since everything stays the same
avcodec_parameters_from_context(out_stream->codecpar, in_stream->codec);
// I don't remember what this is for :)
out_stream->codec->codec_tag = 0;
// This just sets a flag from the format context to the stream relating to the header
if (output_format_context->oformat->flags & AVFMT_GLOBALHEADER)
out_stream->codec->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
}
// Check NOFILE flag and open the output file context (previously the output file was associated with the format context, now it is actually opened.
if (!(output_format->flags & AVFMT_NOFILE))
avio_open(&output_format_context->pb, out_filename.c_str(), AVIO_FLAG_WRITE);
// Write the header (not sure if this is always needed but h264 I believe it is.
avformat_write_header(output_format_context,NULL);
// Re-getting the appropriate stream that was populated above (this should allow for both audio/video)
in_stream_2 = input_format_context->streams[packet.stream_index];
out_stream_2 = output_format_context->streams[packet.stream_index];
// Rescaling pts and dts, duration and pos - you would do as you need in your code.
packet.pts = av_rescale_q_rnd(packet.pts, in_stream_2->time_base, out_stream_2->time_base, (AVRounding) (AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
packet.dts = av_rescale_q_rnd(packet.dts, in_stream_2->time_base, out_stream_2->time_base, (AVRounding) (AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
packet.duration = av_rescale_q(packet.duration, in_stream_2->time_base, out_stream_2->time_base);
packet.pos = -1;
// The first packet of my stream always gives me negative dts/pts so this just protects that first one for my purposes. You probably don't need.
if (packet.dts < 0) packet.dts = 0;
if (packet.pts < 0) packet.pts = 0;
// Finally write the frame
av_interleaved_write_frame(output_format_context, &packet);
// ....
// Write header, close/cleanup... etc
// ....
This code is fairly bare bones and doesn't include the setup (which it sounds like you're doing correctly anyway). I would also imagine this code could be cleaned up and tweaked for your purposes, but this works for me to re-write the RTSP stream into a file (in my case many files but code not shown).
The code is C code, so you might need to do minor tweaks for making it Swift compatible (for some of the library function calls maybe). I think overall it should be compatible though.
Hopefully this helps point you to the right direction. This was cobbled together thanks to several sample code sources (I don't remember where), along with warning prompts from the libraries themselves.

How do I recursively copy a directory using vala?

I'm new to Vala, so this may be a stupid question.
According to #vala on gimpnet, it is not possible to recursively copy directories using Glib.File.copy. At the moment I am using:
Posix.system("cp -r absolutesource absolutedestination")
Is there a better method of doing this?
As I told you in IRC, you can just write a function to do it yourself by calling GLib.File.copy for each file you want to copy. Here is a basic example:
public bool copy_recursive (GLib.File src, GLib.File dest, GLib.FileCopyFlags flags = GLib.FileCopyFlags.NONE, GLib.Cancellable? cancellable = null) throws GLib.Error {
GLib.FileType src_type = src.query_file_type (GLib.FileQueryInfoFlags.NONE, cancellable);
if ( src_type == GLib.FileType.DIRECTORY ) {
dest.make_directory (cancellable);
src.copy_attributes (dest, flags, cancellable);
string src_path = src.get_path ();
string dest_path = dest.get_path ();
GLib.FileEnumerator enumerator = src.enumerate_children (GLib.FileAttribute.STANDARD_NAME, GLib.FileQueryInfoFlags.NONE, cancellable);
for ( GLib.FileInfo? info = enumerator.next_file (cancellable) ; info != null ; info = enumerator.next_file (cancellable) ) {
copy_recursive (
GLib.File.new_for_path (GLib.Path.build_filename (src_path, info.get_name ())),
GLib.File.new_for_path (GLib.Path.build_filename (dest_path, info.get_name ())),
flags,
cancellable);
}
} else if ( src_type == GLib.FileType.REGULAR ) {
src.copy (dest, flags, cancellable);
}
return true;
}
Also, it's worth noting that you might want to use one of the functions in GLib.Process instead of Posix.system.
I came across this question looking for something similar in C using GLib/GIO.
Here's my attempt at converting the C++ code above to C
gboolean
copy_recursive(GFile *src, GFile *dest, GFileCopyFlags flags, GCancellable *cancellable, GError **error) {
GFileType src_type = g_file_query_file_type(src, G_FILE_QUERY_INFO_NONE, cancellable);
if (src_type == G_FILE_TYPE_DIRECTORY) {
g_file_make_directory(dest, cancellable, error);
g_file_copy_attributes(src, dest, flags, cancellable, error);
GFileEnumerator *enumerator = g_file_enumerate_children(src, G_FILE_ATTRIBUTE_STANDARD_NAME, G_FILE_QUERY_INFO_NONE, cancellable, error);
for (GFileInfo *info = g_file_enumerator_next_file(enumerator, cancellable, error); info != NULL; info = g_file_enumerator_next_file(enumerator, cancellable, error)) {
const char *relative_path = g_file_info_get_name(info);
copy_recursive(
g_file_resolve_relative_path(src, relative_path),
g_file_resolve_relative_path(dest, relative_path),
flags, cancellable, error);
}
} else if (src_type == G_FILE_TYPE_REGULAR) {
g_file_copy(src, dest, flags, cancellable, NULL, NULL, error);
}
return TRUE;
}
As a bonus, here's a function that recursively deletes a directory
gboolean
delete_recursive(GFile *file, GCancellable *cancellable, GError **error) {
GFileType file_type = g_file_query_file_type(file, G_FILE_QUERY_INFO_NONE, cancellable);
if (file_type == G_FILE_TYPE_DIRECTORY) {
GFileEnumerator *enumerator = g_file_enumerate_children(file, G_FILE_ATTRIBUTE_STANDARD_NAME, G_FILE_QUERY_INFO_NONE, cancellable, error);
for (GFileInfo *info = g_file_enumerator_next_file(enumerator, cancellable, error); info != NULL; info = g_file_enumerator_next_file(enumerator, cancellable, error)) {
delete_recursive(
g_file_resolve_relative_path(file, g_file_info_get_name(info)),
cancellable, error);
}
g_file_delete(file, cancellable, error);
} else if (file_type == G_FILE_TYPE_REGULAR) {
g_file_delete(file, cancellable, error);
}
return TRUE;
}

Trying to upload file using CFHTTPMessage

I am facing an issue with uploading of file using HTTP file upload. The protocol followed as per the client-server understanding is that, the http request headers for
upload txn should go first followed by a stream which should contain the data of the request XML followed by the data of the file buffer.
Now, the request headers are reaching and server after reading the headers starts reading the stream. It gets the request XML from the stream as well.
After this, server reads the file data from the stream. After the file upload shows done, I try to download the same file. In the server, I see the file is there and it shows correct size too.
After I download the file, I find the file is cropped in the end (in case of a jpg file) or some end text is missing (in case of txt file).
According to my project's logic, data is transferred in 8KB chunks. I have verified that the last chunk which is lesser than 8KB is the missing data causing the cropping
of images. I have also verified that the last chunk has valid data in the buffer formed by the fileHandler API. I am sending a few code snippets which i have used for the write logic.
Can you identify any particular reason for the last chunk not getting streamed or some junk data getting streamed in place of the last chunk?
Code snippet for creating the writestream:
const UInt8 *uploadBuf;
uploadBuf = (const UInt8*)malloc(totLen);
CFReadStreamRef readStreamForBody = CFReadStreamCreateWithBytesNoCopy(kCFAllocatorDefault, uploadBuf, (CFIndex)totLen, kCFAllocatorNull);
CFReadStreamRef readStreamForUpload = CFReadStreamCreateForStreamedHTTPRequest(kCFAllocatorDefault, messageRef, readStreamForBody);
if (CFReadStreamOpen(readStreamForUpload) == TRUE)
{
CFWriteStreamRef writeStream = CFWriteStreamCreateWithBuffer(kCFAllocatorDefault, (UInt8*)uploadBuf, (CFIndex)totLen);
if (CFWriteStreamOpen(writeStream) == FALSE)
{
CFErrorRef streamError = CFWriteStreamCopyError(writeStream);
CFStringRef streamErrorString = CFErrorCopyDescription(streamError);
CFShow(streamErrorString);
CFRelease(streamError);
CFRelease(streamErrorString);
CFWriteStreamClose(writeStream);
CFRelease(writeStream);
writeStream = NULL;
}
else
{
Boolean hasBytes = FALSE;
CFStreamStatus streamStatus = CFWriteStreamGetStatus(writeStream);
do
{
while ((hasBytes == FALSE) &&
((streamStatus != kCFStreamStatusNotOpen) &&
(streamStatus != kCFStreamStatusAtEnd) &&
(streamStatus != kCFStreamStatusClosed) &&
(streamStatus != kCFStreamStatusError)))
{
CFRunLoopRunInMode(kCFRunLoopDefaultMode, 0.25, false);
hasBytes = CFWriteStreamCanAcceptBytes(writeStream);
streamStatus = CFWriteStreamGetStatus(writeStream);
}
} while (hasBytes != TRUE);
Code snippet for saving the writestream:
[self setWriteStreamForFile:writeStream];
[[MyHTTPLastRequestList lastRequestList] setObject:self forKey:[self sessionKey]];
Code snippet for writing the buffer to the writestream:
- (void) write:(const void*)fileBuffer numBytes:(NSUInteger)fileBufSize
{
MyThreadSafeDictionary *lastRequestList = [MyHTTPLastRequestList lastRequestList];
NSString *sessionKey = [self sessionKey];
BOOL noResponse = YES;
while (noResponse)
noResponse = ([lastRequestList objectForKey:sessionKey] == nil);
MyHTTPMessage *httpFileUploadMsg = (MyHTTPMessage*)[lastRequestList objectForKey:sessionKey];
CFIndex totalLen = [httpFileUploadMsg totalLength];
CFIndex sentLen = [httpFileUploadMsg sentLength];
NSData *chunkData = [[[NSData dataWithBytes:fileBuffer length:fileBufSize] retain] autorelease];
sentLen += (CFIndex)fileBufSize;
CFIndex bytesRemaining = totalLen - sentLen;
CFIndex bytesWritten = 0;
CFWriteStreamRef writeStream = [httpFileUploadMsg writeStreamForFile];
if (bytesRemaining > 0)
{
CFWriteStreamRef writeStream = [httpFileUploadMsg writeStreamForFile];
bytesWritten = CFWriteStreamWrite(writeStream, (const UInt8*)[chunkData bytes], (CFIndex)fileBufSize);
[httpFileUploadMsg setSentLength:sentLen];
NSLog(#"Bytes written to stream = %d",(NSInteger)sentLen);
[httpFileUploadMsg setWriteStreamForFile:writeStream];
}
else
{
#try
{
bytesWritten = CFWriteStreamWrite(writeStream, (const UInt8*)[chunkData bytes], (CFIndex)fileBufSize);
if (bytesWritten == 0)
{
NSLog(#"ERROR :: Stream Filled to capacity");
}
else if (bytesWritten < 0)
{
NSLog(#"ERROR :: Write failed.");
CFErrorRef streamError = CFWriteStreamCopyError(writeStream);
CFStringRef streamErrorString = CFErrorCopyDescription(streamError);
CFShow(streamErrorString);
CFRelease(streamError);
CFRelease(streamErrorString);
}
else
{
NSLog(#"Write Success");
}
}
#catch (NSException* localException)
{
NSLog(#"Exception raised - %#",[localException name]);
[localException raise];
}
NSLog(#"Sending to the server");
CFReadStreamRef readStreamForUpload = [httpFileUploadMsg readStreamForHTTPResponse];
CFStreamStatus rStreamStatus = CFReadStreamGetStatus(readStreamForUpload);
Boolean parentStreamHasBytes = FALSE;
do
{
while ((parentStreamHasBytes == FALSE) &&
((rStreamStatus != kCFStreamStatusNotOpen) &&
(rStreamStatus != kCFStreamStatusAtEnd) &&
(rStreamStatus != kCFStreamStatusClosed) &&
(rStreamStatus != kCFStreamStatusError)))
{
CFRunLoopRunInMode (kCFRunLoopDefaultMode, 0, false);
parentStreamHasBytes = CFReadStreamHasBytesAvailable(readStreamForUpload);
rStreamStatus = CFReadStreamGetStatus(readStreamForUpload);
}
} while (parentStreamHasBytes != TRUE);

How to write a custom FindElement routine in Selenium?

I'm trying to figure out how to write a custom FindElement routine in Selenium 2.0 WebDriver. The idea would be something like this:
driver.FindElement(By.Method( (ISearchContext) => {
/* examine search context logic here... */ }));
The anonymous method would examine the ISearchContext and return True if it matches; False otherwise.
I'm digging through the Selenium code, and getting a bit lost. It looks like the actual By.* logic is carried out server-side, not client side. That seems to be complicating matters.
Any suggestions?
I do a multi-staged search. I have a method that performs a try catch and then a method that gets the element. In theory you could do a try catch until instead of this way but I like this way better because of my setup.
public bool CheckUntil(IWebDriver driver, string selectorType, string selectorInfo)
{
int Timer = 160;
bool itemFound = false;
for (int i = 0; i < Timer; i++)
if(itemFound)
{
i = 0
}
else
{
Thread.Sleep(500);
if(selectorType.ToLower() == "id" && TryCatch(driver, selectorType, selectorInfo))
{
if(driver.FindElement(By.Id(selectorInfo).Displayed)
{
itemFound = true;
}
}
else if(selectorType.ToLower() == "tagname" && TryCatch(driver, selectorType, selectorInfo))
{
if(driver.FindElement(By.TagName(selectorInfo).Displayed)
{
itemFound = true;
}
}
}
return itemFound;
}
Here's my try catch method you can add as many different types as you want id, cssselector, xpath, tagname, classname, etc.
public bool TryCatch(IWebDriver driver, string selectorType, string selectorInfo)
{
bool ElementFound = false;
try
{
switch(selectorType)
{
case "id":
driver.FindElement(By.Id(selectorInfo);
break;
case "tagname":
driver.FindElement(By.TagName(selectorInfo);
break;
}
ElementFound = truel
}
catch
{
ElementFound = false;
}
return ElementFound;
}
Ok, I figured out how to do this. I'm leveraging driver.ExecuteScript() to run custom js on the webdriver. It looks a bit like this:
function elementFound(elem) {
var nodeType = navigator.appName == ""Microsoft Internet
Explorer"" ? document.ELEMENT_NODE : Node.ELEMENT_NODE;
if(elem.nodeType == nodeType)
{
/* Element identification logic here */
}
else { return false; }
}
function traverseElement(elem) {
if (elementFound(elem) == true) {
return elem;
}
else {
for (var i = 0; i < elem.childNodes.length; i++) {
var ret = traverseElement(elem.childNodes[i]);
if(ret != null) { return ret; }
}
}
}
return traverseElement(document);

SDL_image's IMG_Load do not work

I am using IMG_Load() to load png file, but it simply not working.
loadedImage = IMG_Load(filename.c_str()); after this sentence, loadedImage is still NULL,not error happened.
PS:I am using VS C++2008, png file is in the develop folder. Here is my code:(It is exactly what Lazy Foo like)
//The headers
#include "SDL.h"
#include "SDL_image.h"
#include <string>
#pragma comment( linker, "/subsystem:\"windows\" /entry:\"mainCRTStartup\"" )
//Screen attributes
const int SCREEN_WIDTH = 640;
const int SCREEN_HEIGHT = 480;
const int SCREEN_BPP = 32;
//The surfaces
SDL_Surface *image = NULL;
SDL_Surface *screen = NULL;
SDL_Surface *load_image( std::string filename )
{
//The image that's loaded
SDL_Surface* loadedImage = NULL;
//The optimized image that will be used
SDL_Surface* optimizedImage = NULL;
//Load the image using SDL_image
loadedImage = IMG_Load(filename.c_str());
//If the image loaded
if( loadedImage != NULL )
{
//Create an optimized image
//cout<<"Flag";
optimizedImage = SDL_DisplayFormat( loadedImage );
//Free the old image
SDL_FreeSurface( loadedImage );
}
//Return the optimized image
return optimizedImage;
}
void apply_surface( int x, int y, SDL_Surface* source, SDL_Surface* destination )
{
//Rectangle to hold the offsets
SDL_Rect offset;
//Get offsets
offset.x = x;
offset.y = y;
//Blit the surface
SDL_BlitSurface( source, NULL, destination, &offset );
}
bool init()
{
//Initialize all SDL subsystems
if( SDL_Init( SDL_INIT_EVERYTHING ) == -1 )
{
return false;
}
//Set up the screen
screen = SDL_SetVideoMode( SCREEN_WIDTH, SCREEN_HEIGHT, SCREEN_BPP, SDL_SWSURFACE );
//If there was an error in setting up the screen
if( screen == NULL )
{
return false;
}
//Set the window caption
SDL_WM_SetCaption( "PNG test", NULL );
//If everything initialized fine
return true;
}
void clean_up()
{
//Free the surface
SDL_FreeSurface( image );
//Quit SDL
SDL_Quit();
}
int main( int argc, char* args[] )
{
//Initialize
if( init() == false )
{
return 1;
}
//Load the image
image = load_image( "look.png" );
//If there was a problem in loading the image
if( image == NULL )
{
return 5;
}
//Apply the surface to the screen
apply_surface( 0, 0, image, screen );
//Update the screen
if( SDL_Flip( screen ) == -1 )
{
return 1;
}
//Wait 2 seconds
SDL_Delay( 2000 );
//Free the surface and quit SDL
clean_up();
return 0;
}
The output return 5.
my bad.
I just copy SDL_image.dll to exe floder.
I should also copy zlib1.dll and libpng12-0.dll
Actually, all the dll is need, because if there is not such dll, the program won't give any error prompt, which is confusing.
If you download libpng from the website, you will find a file called libpng12.dll. Copying that file into system32 does not work, because SDL_image actually looks for a file called "libpng12-0.dll". I found that out after having implemented the IMG_GetError() method in my code. The error message says it very clearly...
So what you do is copying libpng12.dll to system32 and rename it to libpng12-0.dll! And everything works just fine.