Is there any way to Convert NSView Contain Subviews into NSImage? - objective-c

Is there any way to Convert NSView Contain many Subviews with background color into NSImage ?
Sample code will be great
NSBitmapImageRep *rep = [[NSBitmapImageRep alloc] initWithFocusedViewRect:[_collageView bounds]];
[_collageView unlockFocus];
// NSData *exportedData = [rep representationUsingType:NSJPEGFileType properties:nil];
NSData *exportedData = [rep representationUsingType:NSPNGFileType properties:nil];
NSSavePanel *savepanel = [NSSavePanel savePanel];
savepanel.title = #"Save chart";
[savepanel setAllowedFileTypes:[NSArray arrayWithObject:#"png"]];
[savepanel beginSheetModalForWindow:self.view.window completionHandler:^(NSInteger result)
{
if (NSFileHandlingPanelOKButton == result)
{
NSURL* fileURL = [savepanel URL];
if ([fileURL.pathExtension isEqualToString:#""])
fileURL = [fileURL URLByAppendingPathExtension:#"png"];
[exportedData writeToURL:fileURL atomically:YES];
}
}];
i also used the following code to
[_collageView lockFocus];
NSBitmapImageRep *bits = [[NSBitmapImageRep alloc]initWithFocusedViewRect: [_collageView bounds]];
[_collageView unlockFocus];
NSImage *image1 = [[NSImage alloc] init];
[image1 addRepresentation: bits];
[self.ImageView setImage:image1];
but not working

Your approach copies the bitmap from the windows server. This is the rendered content of the view. Typically you do not want to do that.
It is a better way to lock the focus on an NSImage, which builds up the graphic contexts and then draw into that image using -drawRect: or whatever drawing methods you have. You can add subviews by simply iterating them.
// perhaps in a category of NSView
- (void)drawRecursively
{
[self drawRect:self.bounds];
for (NSView *subview in self.subviews)
{
[subview drawRecursively];
}
}
To start that
NSImage *image = [[NSImage alloc] initWithSize:theView.bounds.size];
[image lockFocus]; // Set-up the context for the image
[theView drawRecursively];
[image unlockFocus];
Typped in Safari.
Edit: Before going to the subviews, you have to perform a transformation for them. Forgot that.

I have actually implemented the drawing of a view hierarchy offscreen myself.
It is non-trivial, even for a very simple view hierarchy where all views are
subclasses NSControl and have -(BOOL) isFlipped { return YES; }
Here's one small portion of that code, which will give you an idea of the pain of doing this:
- (void)drawRect:(NSRect)iDirtyBigRect
{
#ifdef DISABLE_PSEUDOVIEW
debugRectsBeingDrawn();
return;
#endif
if ( !drawableShape )
{
if ( [[[self window] contentView] wantsLayer] )
{
CGRect cgRect = NSRectToCGRect([self bounds]);
drawableShape = HIShapeCreateMutableWithRect( &cgRect );
}
else
{
DUMP( #"our superview should have set up our drawableShape!" );
[self setNeedsDisplay:YES];
return;
}
}
else if ( iDirtyBigRect.size.width <= 0 )
{
// Our drawable shape is empty.
// This can happen for several valid reasons:
// 1) dirtyShape is fully masked by opaqueShape
// 2) for some reason, we were called to obey needsDisplay when not needed
DLog( #"nothing to draw %#", self );
return; // must return, otherwise we risk drawing outside our bounds
}
//NSArray *hardSubviews = [self subviews];
for ( NSView *pseudoSubview in pseudoSubviews )
{
if ( ![pseudoSubview isHidden] &&
[pseudoSubview window] == offscreenWindow ) // only draw pseudo view if it's in offscreen window
{
NSRect rectToDraw = [pseudoSubview frame];
// if ( rectToDraw.size.width == 0 )
// {
// DLog( #"clipped to superview %#", pseudoSubview );
// }
CGRect cgRect = NSRectToCGRect(rectToDraw);
if ( HIShapeIntersectsRect(drawableShape, &cgRect) )
{
// the magic: transform to subview coordinates
NSAffineTransform* xform = [NSAffineTransform transform];
[xform translateXBy:rectToDraw.origin.x yBy:rectToDraw.origin.y];
if ( [pseudoSubview isFlipped] != [self isFlipped] )
{
if ( ![pseudoSubview isKindOfClass: [NSColorWell class]] )
{
DUMP( #"hmmm flippedness different %d %d", [pseudoSubview isFlipped], [self isFlipped] );
}
[xform translateXBy:0.0 yBy:rectToDraw.size.height];
[xform scaleXBy:1.0 yBy:-1.0];
}
[xform concat];
HIMutableShapeRef newShape = HIShapeCreateMutableWithRect( &cgRect );
HIShapeIntersect( newShape, drawableShape, newShape ); // clip to subview frame
HIShapeOffset( newShape, -cgRect.origin.x, -cgRect.origin.y ); // translate to subview coords
CGRect dirtyRect;
HIShapeGetBounds(newShape, &dirtyRect);
// if ( dirtyRect.size.width <= 0 )
// {
// DUMP( #"why?" );
// }
if ( [pseudoSubview isKindOfClass:[PseudoView class]] )
{
//DLog( #"drawing Pseudo %# dirtyRect %# ", pseudoSubview, NSStringFromRect(NSRectFromCGRect(dirtyRect)));
PseudoView *pView = (PseudoView *)pseudoSubview;
if ( pView->drawableShape )
{
CFRelease( pView->drawableShape );
pView->drawableShape = NULL;
}
// create subview personal drawableShape from my drawable shape
pView->drawableShape = newShape;
if ( [pseudoSubview isOpaque] )
gDrawDirtyPixels += dirtyRect.size.width * dirtyRect.size.height;
if ( dirtyRect.size.width > 0 )
[pseudoSubview drawRect: NSRectFromCGRect(dirtyRect)];
}
else
{
//DLog( #"drawing non-Pseudo %# rectToDraw %# ", pseudoSubview, NSStringFromRect( rectToDraw ));
UInt64 t1 = CpuCycles();
CFRelease( newShape );
// sacrifice efficiency to avoid bugs...
// always draw the entire view.
[pseudoSubview drawRect:[pseudoSubview bounds]]; // NSRectFromCGRect(dirtyRect)]; // [pseudoSubview bounds]];
UnitTestQuartz( [pseudoSubview bounds] );
// NSLog( #"drawRect %# %# sUnitTestQuartzToggle %f\n -- superv %#\n -- self %#", pseudoSubview, NSStringFromRect(rectToDraw), sUnitTestQuartzToggle*100, [pseudoSubview superview], self );
UInt64 t2 = CpuCycles();
int diff = (int)(t2-t1);
gDrawControl += diff;
}
[xform invert]; // restore
[xform concat];
}
}
}
//if ( [self isKindOfClass:[PseudoRootView class]] )
// [offscreenWindow setViewsNeedDisplay:NO];
}

I am able to solve my problem with the following Code
// Setup the image to render
NSRect imgRect = _collageView.frame;
NSSize imgSize = imgRect.size;
NSBitmapImageRep *rep = [[NSBitmapImageRep alloc] initWithBitmapDataPlanes:NULL
pixelsWide:imgSize.width
pixelsHigh:imgSize.height
bitsPerSample:8
samplesPerPixel:4
hasAlpha:YES
isPlanar:NO
colorSpaceName:NSDeviceRGBColorSpace
bitmapFormat:NSAlphaFirstBitmapFormat
bytesPerRow:0
bitsPerPixel:0];
NSGraphicsContext *context = [NSGraphicsContext graphicsContextWithBitmapImageRep:rep];
[NSGraphicsContext saveGraphicsState];
[NSGraphicsContext setCurrentContext:context];
// Render
CGContextRef zCgContextRef = (CGContextRef) [context graphicsPort];
[[_collageView layer] renderInContext:zCgContextRef];
// Get the Data for the image
NSData *exportedData = [rep representationUsingType:NSJPEGFileType properties:nil];
// Start the savepanel
NSSavePanel *savepanel = [NSSavePanel savePanel];
savepanel.title = #"Save chart";
[savepanel setAllowedFileTypes:[NSArray arrayWithObject:#"jpg"]];
[savepanel beginSheetModalForWindow:self.view.window completionHandler:^(NSInteger result)
{
if (NSFileHandlingPanelOKButton == result)
{
NSURL* fileURL = [savepanel URL];
if ([fileURL.pathExtension isEqualToString:#""])
fileURL = [fileURL URLByAppendingPathExtension:#"jpg"];
[exportedData writeToURL:fileURL atomically:YES];
}
}];

Related

Resize cropped image

Have the similar problem as this post:
How to remove the transparent area of an UIImageView after masking?
but solution doesn't help me, in my case I am cropping image to multi parts, and every part has bounds like parent image had, this is cropping function:
- (UIImage*) maskImage:(CALayer *) sourceLayer toAreaInsidePath:(UIBezierPath*) maskPath;
{
return [self compositeImage:sourceLayer onPath:maskPath usingBlendMode:kCGBlendModeSourceIn];
}
- (UIImage*) compositeImage:(CALayer *) layer onPath:(UIBezierPath*) path usingBlendMode:(CGBlendMode) blend;
{
UIGraphicsBeginImageContext([layer frame].size);
[layer renderInContext:UIGraphicsGetCurrentContext()];
UIImage *sourceImage = UIGraphicsGetImageFromCurrentImageContext();
UIGraphicsEndImageContext();
UIGraphicsBeginImageContext([layer frame].size);
[path fill];
[sourceImage drawInRect:[layer frame] blendMode:blend alpha:1.0];
UIImage *maskedImage = UIGraphicsGetImageFromCurrentImageContext();
UIGraphicsEndImageContext();
return maskedImage;
}
and how I am using it:
- (PuzzlePart *)calculate:(NSDictionary *) item
{
UIBezierPath *path = [self calculatePath:[item objectForKey:#"figure"]];
UIImageView *iView = [[UIImageView alloc] initWithImage:image];
//iView.contentMode = UIViewContentModeCenter;
UIImage *imagePart = [self maskImage:iView.layer toAreaInsidePath:path];
iView.image = imagePart;
return [[PuzzlePart alloc] initWithParams:imagePart lhckID:lifehackID];
}
- (UIBezierPath *)calculatePath:(NSArray *) points {
UIBezierPath *path = [UIBezierPath bezierPath];
NSDictionary *point = [points objectAtIndex:0];
[path moveToPoint:CGPointMake([point[#"x"] intValue], [point[#"y"] intValue])];
for (int i = 1; i < [points count]; i++) {
point = [points objectAtIndex:i];
[path addLineToPoint:CGPointMake([point[#"x"] intValue], [point[#"y"] intValue])];
}
[path closePath];
return path;
}
and place it:
- (void)placeItems
{
for (PuzzlePart *part in puzzleParts) {
UIImage *imagePart = [part imagePart];
CGRect newRect = [self cropRectForImage:imagePart];
UIImage *image = [self imageWithImage:imagePart convertToSize:newRect.size];
UIImageView *imageView = [[UIImageView alloc] initWithImage:image];
//imageView.contentMode = UIViewContentModeCenter;
imageView.center = CGPointMake([self getRandomNumberBetween:0 to:self.view.frame.size.width],
[self getRandomNumberBetween:0 to:self.view.frame.size.height]);
[self.view addSubview:imageView];
while ([self viewIntersectsWithAnotherView:self.view chosenView:imageView]) {
viewPositionMovingStep++;
[self placeItem:self.view:imageView];
}
}
[self startCountdown];
}
[self cropRectForImage:imagePart] is the same method from post I marked
and this what I have after:
any ideas? Thank you!

Exporting unshown NSViewController to JPG

I am working on an desktop app that generates an image from en NSViewController that is not shown.
The view generated has 3 NSImageViews which change the content once created.
I manage to export the NSViewController's view to JPG file, but it never renders in the image given to the NSImageView, it keeps the image which is set in the nib. Also, if I don't set the background color to the view's layer... I get an empty result.
What am I missing here? I guess I need to update the NSViewController's view... but how can I do that? I do not want to show my NSViewController.
My code:
- (IBAction)doExport:(id)sender {
ExportView *view = [[ExportView alloc] initWithNibName:#"ExportView" bundle:nil];
[view.pic1 setImage:self.pic1.image];
[view.pic2 setImage:self.pic2.image];
[view.pic3 setImage:self.pic3.image];
[view.icon setImage:self.icon.image];
// Hardcoding for test
[view.pic1 setImage:[NSImage imageNamed:#"iPhone 4-Inch Screenshot 1"]];
[view.pic1 setNeedsLayout:YES];
[view.pic1 setNeedsDisplay];
[view.pic1 updateLayer];
// Setup the image to render
NSRect imgRect = view.view.frame;
NSSize imgSize = imgRect.size;
NSBitmapImageRep *rep = [[NSBitmapImageRep alloc] initWithBitmapDataPlanes:NULL
pixelsWide:imgSize.width
pixelsHigh:imgSize.height
bitsPerSample:8
samplesPerPixel:4
hasAlpha:YES
isPlanar:NO
colorSpaceName:NSDeviceRGBColorSpace
bitmapFormat:NSAlphaFirstBitmapFormat
bytesPerRow:0
bitsPerPixel:0];
NSGraphicsContext *g = [NSGraphicsContext graphicsContextWithBitmapImageRep:rep];
[NSGraphicsContext saveGraphicsState];
[NSGraphicsContext setCurrentContext:g];
// Set view background color
CALayer *viewLayer = [CALayer layer];
[viewLayer setBackgroundColor:[NSColor clearColor].CGColor];
[view.view setWantsLayer:YES];
[view.view setLayer:viewLayer];
// Render
CGContextRef zCgContextRef = (CGContextRef) [g graphicsPort];
[[view.view layer] renderInContext:zCgContextRef];
// Render pic as well, for test
[view.pic1.layer renderInContext:zCgContextRef];
// Get the Data for the image
NSData *exportedData = [rep representationUsingType:NSJPEGFileType properties:nil];
// Start the savepanel
NSSavePanel *savepanel = [NSSavePanel savePanel];
savepanel.title = #"Save chart";
[savepanel setAllowedFileTypes:[NSArray arrayWithObject:#"jpg"]];
[savepanel beginSheetModalForWindow:self.window completionHandler:^(NSInteger result)
{
if (NSFileHandlingPanelOKButton == result)
{
NSURL* fileURL = [savepanel URL];
if ([fileURL.pathExtension isEqualToString:#""])
fileURL = [fileURL URLByAppendingPathExtension:#"jpg"];
[exportedData writeToURL:fileURL atomically:YES];
}
}];
}
I solved it in a very ugly way. Basically what I do is I add the view to my contentView and set setHidden to YES straight away. This will intialize the view. Now I change the content of my Nib view.
When its time to render the data to NSData I change setHidden to NO, generate the NSData and then remove the view.
The end result (no beautiful code :( ):
- (IBAction)doExport:(id)sender {
ExportView *view = [[ExportView alloc] initWithNibName:#"ExportView" bundle:nil];
[view.view setHidden:YES];
[self.window.contentView addSubview:view.view];
[view.pic1 setImage:self.pic1.image];
[view.pic2 setImage:self.pic2.image];
[view.pic3 setImage:self.pic3.image];
[view.icon setImage:self.icon.image];
// Setup the image to render
NSRect imgRect = view.view.frame;
NSSize imgSize = imgRect.size;
NSBitmapImageRep *rep = [[NSBitmapImageRep alloc] initWithBitmapDataPlanes:NULL
pixelsWide:imgSize.width
pixelsHigh:imgSize.height
bitsPerSample:8
samplesPerPixel:4
hasAlpha:YES
isPlanar:NO
colorSpaceName:NSDeviceRGBColorSpace
bitmapFormat:NSAlphaFirstBitmapFormat
bytesPerRow:0
bitsPerPixel:0];
NSGraphicsContext *g = [NSGraphicsContext graphicsContextWithBitmapImageRep:rep];
[NSGraphicsContext saveGraphicsState];
[NSGraphicsContext setCurrentContext:g];
// Set view background color
CALayer *viewLayer = [CALayer layer];
[viewLayer setBackgroundColor:[NSColor clearColor].CGColor];
[view.view setWantsLayer:YES];
[view.view setLayer:viewLayer];
[view.view setHidden:NO];
// Render
CGContextRef zCgContextRef = (CGContextRef) [g graphicsPort];
[[view.view layer] renderInContext:zCgContextRef];
[view.view removeFromSuperview];
// Get the Data for the image
NSData *exportedData = [rep representationUsingType:NSJPEGFileType properties:nil];
// Start the savepanel
NSSavePanel *savepanel = [NSSavePanel savePanel];
savepanel.title = #"Save chart";
[savepanel setAllowedFileTypes:[NSArray arrayWithObject:#"jpg"]];
[savepanel setNameFieldStringValue:#"Test"];
[savepanel beginSheetModalForWindow:self.window completionHandler:^(NSInteger result)
{
if (NSFileHandlingPanelOKButton == result)
{
NSURL* fileURL = [savepanel URL];
if ([fileURL.pathExtension isEqualToString:#""])
fileURL = [fileURL URLByAppendingPathExtension:#"jpg"];
[exportedData writeToURL:fileURL atomically:YES];
}
}];
}

How control memory usage when applying CIFilters?

When I apply CIFilters to images the memory usage keeps growing and I don't know what to do.
I've tried everything I could:
using #autoreleasepool:
- (UIImage *)applySepiaToneTo:(UIImage *)img //Sepia
{
#autoreleasepool
{
CIImage *ciimageToFilter = [CIImage imageWithCGImage:img.CGImage];
CIFilter *sepia = [CIFilter filterWithName:#"CISepiaTone"
keysAndValues: kCIInputImageKey, ciimageToFilter,
#"inputIntensity", #1.0, nil];
return [self retrieveFilteredImageWithFilter:sepia];
}
}
- (UIImage *)retrieveFilteredImageWithFilter:(CIFilter *)filtro
{
#autoreleasepool
{
CIImage *ciimageFiltered = [filtro outputImage];
CGImageRef cgimg = [_context createCGImage:ciimageFiltered
fromRect:[ciimageFiltered extent]];
UIImage *filteredImage = [UIImage imageWithCGImage:cgimg];
CGImageRelease(cgimg);
return filteredImage;
}
}
I'm also downsizing the image to be filtered and doing the filtering in a background thread:
- (void)filterWasSelected:(NSNotification *)notification
{
self.darkeningView.alpha = 0.5;
self.darkeningView.userInteractionEnabled = YES;
[self.view bringSubviewToFront:self.darkeningView];
[self.activityIndic startAnimating];
[self.view bringSubviewToFront:self.activityIndic];
int indice = [notification.object intValue];
__block NSArray *returnObj;
__block UIImage *auxUiimage;
if(choosenImage.size.width == 1280 || choosenImage.size.height == 1280)
{
UIImageView *iv;
if(choosenImage.size.width >= choosenImage.size.height)
{
float altura = (320 * choosenImage.size.height)/choosenImage.size.width;
iv = [[UIImageView alloc] initWithFrame:CGRectMake(0,0,320,altura)];
iv.image = choosenImage;
}
else
{
float largura = (choosenImage.size.width * 320)/choosenImage.size.height;
iv = [[UIImageView alloc] initWithFrame:CGRectMake(0,0,largura,320)];
iv.image = choosenImage;
}
UIGraphicsBeginImageContextWithOptions(iv.bounds.size, YES, 0.0);
[iv.layer renderInContext:UIGraphicsGetCurrentContext()];
auxUiimage = UIGraphicsGetImageFromCurrentImageContext();
UIGraphicsEndImageContext();
}
else
auxUiimage = choosenImage;
dispatch_async(dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_BACKGROUND, 0), ^{
if(artisticCollection)
returnObj = [self.filterCoordinator setupFilterArtisticType:indice toImage:auxUiimage];
else
returnObj = [self.filterCoordinator setupFilterOldOrVintageType:indice toImage:auxUiimage];
dispatch_async(dispatch_get_main_queue(), ^{
self.darkeningView.alpha = 0.3;
self.darkeningView.userInteractionEnabled = NO;
[self.activityIndic stopAnimating];
[self.view bringSubviewToFront:stageBackground];
[self.view bringSubviewToFront:stage];
[self.view bringSubviewToFront:self.filtersContainerView];
[self.view bringSubviewToFront:self.framesContainerView];
[self.view bringSubviewToFront:self.colorsContainerView];
if(returnObj)
{
auxUiimage = [returnObj firstObject];
NSLog(#"filtered image width = %f and height = %f", auxUiimage.size.width, auxUiimage.size.height);
returnObj = nil;
choosenImageContainer.image = auxUiimage;
}
});
});
}
I've also tried creating the context using the contextWithEAGLContext method, nothing changed.
I've researched a lot including stack overflow and found nothing.
Until I place the image in the image view (the image comes from the photo album) I'm only using 23 mega of memory, when I apply a filter, the use jumps to 51 mega and does not comes down. If I continue to apply other filters the memory usage only grows.
There's no linking in my app, I've checked in Instruments.
Also the bringSubviewToFront methods are not responsible, I've checked.
It's in the creation of the CIImage followed by the creation of the CIFilter object.
I know that in the process of applying the filter data is loaded into memory, but how to clean the memory after applying the filter?
Is there any secret that I'm not aware of?? Please help

How can I save the image from IKImageView?

Okay... I want to save the visible rectangle of the IKImageView image only.
My problem is that, somehow, if I had an image in portrait mode it will be not saved in the right orientation. I drawing the image with this code:
[sourceImg drawInRect:targetRect fromRect:sourceRect operation:NSCompositeSourceOver fraction:1.0f];
On the other side if I draw the image with this code:
[sourceImg drawAtPoint:NSZeroPoint fromRect:sourceRect operation:NSCompositeSourceOver fraction:1.0f];
It will be in the right orientation, but edits like zooming are lost. I mean it will save the right cut out, but unfortunately not in the right size.
Here is the code how I load the image:
- (IBAction)imageSelectionButtonAction:(id)sender {
NSLog(#"%s", __FUNCTION__);
NSOpenPanel *panel = [NSOpenPanel openPanel];
id model = [self getModel];
if (imageView) {
[panel setAllowedFileTypes:[NSImage imageFileTypes]];
[panel beginSheetModalForWindow:[[NSApplication sharedApplication] mainWindow] completionHandler:^(NSInteger returnCode) {
if (returnCode == 1) {
NSURL *imageUrl = [panel URL];
CGImageRef image = NULL;
CGImageSourceRef isr = CGImageSourceCreateWithURL( (__bridge CFURLRef)imageUrl, NULL);
if (isr) {
NSDictionary *options = [NSDictionary dictionaryWithObject: (id)kCFBooleanTrue forKey: (id) kCGImageSourceShouldCache];
image = CGImageSourceCreateImageAtIndex(isr, 0, (__bridge CFDictionaryRef)options);
if (image) {
_imageProperties = (__bridge_transfer NSDictionary*)CGImageSourceCopyPropertiesAtIndex(isr, 0, (__bridge CFDictionaryRef)_imageProperties);
_imageUTType = (__bridge NSString*)CGImageSourceGetType(isr);
}
CFRelease(isr);
}
if (image) {
[imageView setImage:image imageProperties:_imageProperties];
CGImageRelease(image);
}
[[model saveTempMutabDict] setValue:[imageUrl absoluteString] forKey:#"tempImage"];
}
}];
return;
}
}
And here is the code how I save it:
- (void)saveImage:(NSString *)path {
// get the current image from the image view
CGImageRef sourceImgRef = [imageView image];
NSRect targetRect = [imageView visibleRect];
NSImage *sourceImg = [[NSImage alloc] initWithCGImage:sourceImgRef size:NSZeroSize];
NSMutableDictionary *thisTiffDict = [_imageProperties valueForKey:#"{TIFF}"];
NSInteger theOrientation = [[thisTiffDict valueForKey:#"Orientation"] integerValue];
NSImage *targetImg = nil;
if (theOrientation == 6) {
targetImg = [[NSImage alloc] initWithSize:NSMakeSize([imageView frame].size.height, [imageView frame].size.width)];
} else {
targetImg = [[NSImage alloc] initWithSize:NSMakeSize([imageView frame].size.width, [imageView frame].size.height)];
}
NSRect sourceRect = [imageView convertViewRectToImageRect:targetRect];
[targetImg lockFocus];
[[NSGraphicsContext currentContext] setImageInterpolation:NSImageInterpolationHigh];
[sourceImg drawAtPoint:NSZeroPoint fromRect:sourceRect operation:NSCompositeSourceOver fraction:1.0f];
[targetImg unlockFocus];
_saveOptions = [[IKSaveOptions alloc] initWithImageProperties:_imageProperties imageUTType: _imageUTType];
NSString * newUTType = [_saveOptions imageUTType];
CGImageRef targetImgRef = [targetImg CGImageForProposedRect:NULL context:[NSGraphicsContext currentContext] hints:nil];
if (targetImgRef) {
NSURL *url = [NSURL fileURLWithPath: path];
CGImageDestinationRef dest = CGImageDestinationCreateWithURL((__bridge CFURLRef)url, (__bridge CFStringRef)newUTType, 1, NULL);
if (dest) {
CGImageDestinationAddImage(dest, targetImgRef, (__bridge CFDictionaryRef)[_saveOptions imageProperties]);
CGImageDestinationFinalize(dest);
CFRelease(dest);
}
}
}
I have no idea what I'm doing wrong?
Thank you
Jens

Display NSImage on a CALayer

I've been trying to display a NSImage on a CALayer. Then I realised I need to convert it to a CGImage apparently, then display it...
I have this code which doesn't seem to be working
CALayer *layer = [CALayer layer];
NSImage *finderIcon = [[NSWorkspace sharedWorkspace] iconForFileType:NSFileTypeForHFSTypeCode(kFinderIcon)];
[finderIcon setSize:(NSSize){ 128.0f, 128.0f }];
CGImageSourceRef source;
source = CGImageSourceCreateWithData((CFDataRef)finderIcon, NULL);
CGImageRef finalIcon = CGImageSourceCreateImageAtIndex(source, 0, NULL);
layer.bounds = CGRectMake(128.0f, 128.0f, 4, 4);
layer.position = CGPointMake(128.0f, 128.0f);
layer.contents = finalIcon;
// Insert the layer into the root layer
[mainLayer addSublayer:layer];
Why? How can I get this to work?
From the comments: Actually, if you're on 10.6, you can also just set the CALayer's contents to an NSImage rather than a CGImageRef...
If you're on OS X 10.6 or later, take a look at NSImage's CGImageForProposedRect:context:hints: method.
If you're not, I've got this in a category on NSImage:
-(CGImageRef)CGImage
{
CGContextRef bitmapCtx = CGBitmapContextCreate(NULL/*data - pass NULL to let CG allocate the memory*/,
[self size].width,
[self size].height,
8 /*bitsPerComponent*/,
0 /*bytesPerRow - CG will calculate it for you if it's allocating the data. This might get padded out a bit for better alignment*/,
[[NSColorSpace genericRGBColorSpace] CGColorSpace],
kCGBitmapByteOrder32Host|kCGImageAlphaPremultipliedFirst);
[NSGraphicsContext saveGraphicsState];
[NSGraphicsContext setCurrentContext:[NSGraphicsContext graphicsContextWithGraphicsPort:bitmapCtx flipped:NO]];
[self drawInRect:NSMakeRect(0,0, [self size].width, [self size].height) fromRect:NSZeroRect operation:NSCompositeCopy fraction:1.0];
[NSGraphicsContext restoreGraphicsState];
CGImageRef cgImage = CGBitmapContextCreateImage(bitmapCtx);
CGContextRelease(bitmapCtx);
return (CGImageRef)[(id)cgImage autorelease];
}
I think I wrote this myself. But it's entirely possible that I ripped it off from somewhere else like Stack Overflow. It's an older personal project and I don't really remember.
Here's some code which may help you - I sure hope the formatting of this does not get all messed up like it appears is going to happen - all I can offer is that this works for me.
// -------------------------------------------------------------------------------------
- (void)awakeFromNib
{
// setup our main window 'contentWindow' to use layers
[[contentWindow contentView] setWantsLayer:YES]; // NSWindow*
// create a root layer to contain all of our layers
CALayer *root = [[contentWindow contentView] layer];
// use constraint layout to allow sublayers to center themselves
root.layoutManager = [CAConstraintLayoutManager layoutManager];
// create a new layer which will contain ALL our sublayers
// -------------------------------------------------------
mContainer = [CALayer layer];
mContainer.bounds = root.bounds;
mContainer.frame = root.frame;
mContainer.position = CGPointMake(root.bounds.size.width * 0.5,
root.bounds.size.height * 0.5);
// insert layer on the bottom of the stack so it is behind the controls
[root insertSublayer:mContainer atIndex:0];
// make it resize when its superlayer does
root.autoresizingMask = kCALayerWidthSizable | kCALayerHeightSizable;
// make it resize when its superlayer does
mContainer.autoresizingMask = kCALayerWidthSizable | kCALayerHeightSizable;
}
// -------------------------------------------------------------------------------------
- (void) loadMyImage:(NSString*) path
n:(NSInteger) num
x:(NSInteger) xpos
y:(NSInteger) ypos
h:(NSInteger) hgt
w:(NSInteger) wid
b:(NSString*) blendstr
{
#ifdef __DEBUG_LOGGING__
NSLog(#"loadMyImage - ENTER [%#] num[%d] x[%d] y[%d] h[%d] w[%d] b[%#]",
path, num, xpos, ypos, hgt, wid, blendstr);
#endif
NSInteger xoffset = ((wid / 2) + xpos); // use CORNER versus CENTER for location
NSInteger yoffset = ((hgt / 2) + ypos);
CIFilter* filter = nil;
CGRect cgrect = CGRectMake((CGFloat) xoffset, (CGFloat) yoffset,
(CGFloat) wid, (CGFloat) hgt);
if(nil != blendstr) // would be equivalent to #"CIMultiplyBlendMode" or similar
{
filter = [CIFilter filterWithName:blendstr];
}
// read image file via supplied path
NSImage* theimage = [[NSImage alloc] initWithContentsOfFile:path];
if(nil != theimage)
{
[self setMyImageLayer:[CALayer layer]]; // create layer
myImageLayer.frame = cgrect; // locate & size image
myImageLayer.compositingFilter = filter; // nil is OK if no filter
[myImageLayer setContents:(id) theimage]; // deposit image into layer
// add new layer into our main layer [see awakeFromNib above]
[mContainer insertSublayer:myImageLayer atIndex:0];
[theimage release];
}
else
{
NSLog(#"ERROR loadMyImage - no such image [%#]", path);
}
}
+ (CGImageRef) getCachedImage:(NSString *) imageName
{
NSGraphicsContext *context = [[NSGraphicsContext currentContext] graphicsPort];
NSImage *img = [NSImage imageNamed:imageName];
NSRect rect = NSMakeRect(0, 0, [img size].width, [img size].height);
return [img CGImageForProposedRect:&rect context:context hints:NULL];
}
+ (CGImageRef) getImage:(NSString *) imageName withExtension:(NSString *) extension
{
NSGraphicsContext *context = [[NSGraphicsContext currentContext] graphicsPort];
NSString* imagePath = [[NSBundle mainBundle] pathForResource:imageName ofType:extension];
NSImage* img = [[NSImage alloc] initWithContentsOfFile:imagePath];
NSRect rect = NSMakeRect(0, 0, [img size].width, [img size].height);
CGImageRef imgRef = [img CGImageForProposedRect:&rect context:context hints:NULL];
[img release];
return imgRef;
}
then you can set it:
yourLayer.contents = (id)[self getCachedImage:#"myImage.png"];
or
yourLayer.contents = (id)[self getImage:#"myImage" withExtension:#"png"];