Huge memory peak - CGContextDrawImage - objective-c

I use this piece of code to scale and rotate images taken with the photo camera. When I use this I can see a huge memory peak. Something like 20 MB. When I use the instruments I can see that this line:
CGContextDrawImage(ctxt, orig, self.CGImage);
holds 20 MB. Is this normal for full resolution photos? The iPhone 4S can handle it. but older devices crashes because of this code.
After I rescale the image I need it in NSData, so I use the UIImageJPEGRepresentation() method. This together makes the memory peak even higher. It goes to 70 MB in memory usage for several seconds.
And yes, I did read almost all the iOS camera related question about memory usage. But no answer out there.
// WBImage.mm -- extra UIImage methods
// by allen brunson march 29 2009
#include "WBImage.h"
static inline CGFloat degreesToRadians(CGFloat degrees)
{
return M_PI * (degrees / 180.0);
}
static inline CGSize swapWidthAndHeight(CGSize size)
{
CGFloat swap = size.width;
size.width = size.height;
size.height = swap;
return size;
}
#implementation UIImage (WBImage)
// rotate an image to any 90-degree orientation, with or without mirroring.
// original code by kevin lohman, heavily modified by yours truly.
// http://blog.logichigh.com/2008/06/05/uiimage-fix/
-(UIImage*)rotate:(UIImageOrientation)orient
{
CGRect bnds = CGRectZero;
UIImage* copy = nil;
CGContextRef ctxt = nil;
CGRect rect = CGRectZero;
CGAffineTransform tran = CGAffineTransformIdentity;
bnds.size = self.size;
rect.size = self.size;
switch (orient)
{
case UIImageOrientationUp:
return self;
case UIImageOrientationUpMirrored:
tran = CGAffineTransformMakeTranslation(rect.size.width, 0.0);
tran = CGAffineTransformScale(tran, -1.0, 1.0);
break;
case UIImageOrientationDown:
tran = CGAffineTransformMakeTranslation(rect.size.width,
rect.size.height);
tran = CGAffineTransformRotate(tran, degreesToRadians(180.0));
break;
case UIImageOrientationDownMirrored:
tran = CGAffineTransformMakeTranslation(0.0, rect.size.height);
tran = CGAffineTransformScale(tran, 1.0, -1.0);
break;
case UIImageOrientationLeft:
bnds.size = swapWidthAndHeight(bnds.size);
tran = CGAffineTransformMakeTranslation(0.0, rect.size.width);
tran = CGAffineTransformRotate(tran, degreesToRadians(-90.0));
break;
case UIImageOrientationLeftMirrored:
bnds.size = swapWidthAndHeight(bnds.size);
tran = CGAffineTransformMakeTranslation(rect.size.height,
rect.size.width);
tran = CGAffineTransformScale(tran, -1.0, 1.0);
tran = CGAffineTransformRotate(tran, degreesToRadians(-90.0));
break;
case UIImageOrientationRight:
bnds.size = swapWidthAndHeight(bnds.size);
tran = CGAffineTransformMakeTranslation(rect.size.height, 0.0);
tran = CGAffineTransformRotate(tran, degreesToRadians(90.0));
break;
case UIImageOrientationRightMirrored:
bnds.size = swapWidthAndHeight(bnds.size);
tran = CGAffineTransformMakeScale(-1.0, 1.0);
tran = CGAffineTransformRotate(tran, degreesToRadians(90.0));
break;
default:
// orientation value supplied is invalid
assert(false);
return nil;
}
UIGraphicsBeginImageContext(rect.size);
ctxt = UIGraphicsGetCurrentContext();
switch (orient)
{
case UIImageOrientationLeft:
case UIImageOrientationLeftMirrored:
case UIImageOrientationRight:
case UIImageOrientationRightMirrored:
CGContextScaleCTM(ctxt, -1.0, 1.0);
CGContextTranslateCTM(ctxt, -rect.size.height, 0.0);
break;
default:
CGContextScaleCTM(ctxt, 1.0, -1.0);
CGContextTranslateCTM(ctxt, 0.0, -rect.size.height);
break;
}
CGContextConcatCTM(ctxt, tran);
CGContextDrawImage(ctxt, bnds, self.CGImage);
copy = UIGraphicsGetImageFromCurrentImageContext();
UIGraphicsEndImageContext();
return copy;
}
-(UIImage*)rotateAndScaleFromCameraWithMaxSize:(CGFloat)maxSize
{
UIImage* imag = self;
imag = [imag rotate:imag.imageOrientation];
imag = [imag scaleWithMaxSize:maxSize];
return imag;
}
-(UIImage*)scaleWithMaxSize:(CGFloat)maxSize
{
return [self scaleWithMaxSize:maxSize quality:kCGInterpolationHigh];
}
-(UIImage*)scaleWithMaxSize:(CGFloat)maxSize
quality:(CGInterpolationQuality)quality
{
CGRect bnds = CGRectZero;
UIImage* copy = nil;
CGContextRef ctxt = nil;
CGRect orig = CGRectZero;
CGFloat rtio = 0.0;
CGFloat scal = 1.0;
bnds.size = self.size;
orig.size = self.size;
rtio = orig.size.width / orig.size.height;
if ((orig.size.width <= maxSize) && (orig.size.height <= maxSize))
{
return self;
}
if (rtio > 1.0)
{
bnds.size.width = maxSize;
bnds.size.height = maxSize / rtio;
}
else
{
bnds.size.width = maxSize * rtio;
bnds.size.height = maxSize;
}
UIGraphicsBeginImageContext(bnds.size);
ctxt = UIGraphicsGetCurrentContext();
scal = bnds.size.width / orig.size.width;
CGContextSetInterpolationQuality(ctxt, quality);
CGContextScaleCTM(ctxt, scal, -scal);
CGContextTranslateCTM(ctxt, 0.0, -orig.size.height);
CGContextDrawImage(ctxt, orig, self.CGImage);
copy = UIGraphicsGetImageFromCurrentImageContext();
UIGraphicsEndImageContext();
return copy;
}
#end

I ended up using imageIO, less memory!
-(UIImage *)resizeImageToMaxDimension: (float) dimension withPaht: (NSString *)path
{
NSURL *imageUrl = [NSURL fileURLWithPath:path];
CGImageSourceRef imageSource = CGImageSourceCreateWithURL((__bridge CFURLRef)imageUrl, NULL);
NSDictionary *thumbnailOptions = [NSDictionary dictionaryWithObjectsAndKeys:
(id)kCFBooleanTrue, kCGImageSourceCreateThumbnailWithTransform,
kCFBooleanTrue, kCGImageSourceCreateThumbnailFromImageAlways,
[NSNumber numberWithFloat:dimension], kCGImageSourceThumbnailMaxPixelSize,
nil];
CGImageRef thumbnail = CGImageSourceCreateThumbnailAtIndex(imageSource, 0, (__bridge CFDictionaryRef)thumbnailOptions);
UIImage *resizedImage = [UIImage imageWithCGImage:thumbnail];
CFRelease(thumbnail);
CFRelease(imageSource);
return resizedImage;
}

that's correct it comes from the picture that you shot with your camera, older devices use cameras with less resolution, that means that images taken with an iPhone 3g are smaller in resolution (thus size) respect the one that you have on your iPhone4s. Images are usually compressed but when they open up in memory for some sort of operation they must be decompresses, the size that they need is really bigger that the one in a file, because is the number_of_pixel_in_row*number_of_pixel_in_height*byte_for_pixel if I remember well.
Bye,
Andrea

Insert this at the end of your methods and before the return copy;:
CGContextRelease(ctxt);

Related

UIImageView set image orienation after setting from camera roll

Image Orientation changes after setting it from camera roll to UIImage.
UIImagePickerController *controller = [[UIImagePickerController alloc] init];
controller.sourceType = UIImagePickerControllerSourceTypeCamera;
controller.allowsEditing = NO;
controller.mediaTypes = [UIImagePickerController availableMediaTypesForSourceType: UIImagePickerControllerSourceTypeCamera];
controller.delegate = self;
[self.navigationController presentViewController: controller animated: YES completion: nil];
- (void)imagePickerController:(UIImagePickerController *)picker didFinishPickingMediaWithInfo:(NSDictionary *)info{
[self.navigationController dismissViewControllerAnimated: YES completion: nil];
UIImage *image = [info valueForKey: UIImagePickerControllerOriginalImage];
NSData *imageData = UIImageJPEGRepresentation(image, 0.1);
headerCell.userProfileImageView.image = [UIImage imageWithData:imageData];
NSArray *paths = NSSearchPathForDirectoriesInDomains(NSDocumentDirectory, NSUserDomainMask, YES);
NSString *documentsPath = [paths objectAtIndex:0]; //Get the docs directory
NSString *filePath = [documentsPath stringByAppendingPathComponent:#"profileImage.png"]; //Add the file name
NSData *pngData = UIImagePNGRepresentation(image);
[pngData writeToFile:filePath atomically:YES];
}
From Camera Roll its set orientation wrong but from photo library its setting perfectly.
Any one had same issue like this please let us know your comments to fix this issue.
From thread here and below answer(by Dilip Rajkumar):
https://stackoverflow.com/a/10602363/5575752
You can use below method to manage image orientation after picking image:
(Modify method as per your requirement)
- (void)imagePickerController:(UIImagePickerController *)picker didFinishPickingMediaWithInfo:(NSDictionary *)info{
[self.navigationController dismissViewControllerAnimated: YES completion: nil];
UIImage *image = [self scaleAndRotateImage: [info objectForKey:UIImagePickerControllerOriginalImage]];
NSData *imageData = UIImageJPEGRepresentation(image, 0.1);
headerCell.userProfileImageView.image = [UIImage imageWithData:imageData];
NSArray *paths = NSSearchPathForDirectoriesInDomains(NSDocumentDirectory, NSUserDomainMask, YES);
NSString *documentsPath = [paths objectAtIndex:0]; //Get the docs directory
NSString *filePath = [documentsPath stringByAppendingPathComponent:#"profileImage.png"]; //Add the file name
NSData *pngData = UIImagePNGRepresentation(image);
[pngData writeToFile:filePath atomically:YES];
}
- (UIImage *)scaleAndRotateImage:(UIImage *) image {
int kMaxResolution = 320;
CGImageRef imgRef = image.CGImage;
CGFloat width = CGImageGetWidth(imgRef);
CGFloat height = CGImageGetHeight(imgRef);
CGAffineTransform transform = CGAffineTransformIdentity;
CGRect bounds = CGRectMake(0, 0, width, height);
if (width > kMaxResolution || height > kMaxResolution) {
CGFloat ratio = width/height;
if (ratio > 1) {
bounds.size.width = kMaxResolution;
bounds.size.height = bounds.size.width / ratio;
}
else {
bounds.size.height = kMaxResolution;
bounds.size.width = bounds.size.height * ratio;
}
}
CGFloat scaleRatio = bounds.size.width / width;
CGSize imageSize = CGSizeMake(CGImageGetWidth(imgRef), CGImageGetHeight(imgRef));
CGFloat boundHeight;
UIImageOrientation orient = image.imageOrientation;
switch(orient) {
case UIImageOrientationUp: //EXIF = 1
transform = CGAffineTransformIdentity;
break;
case UIImageOrientationUpMirrored: //EXIF = 2
transform = CGAffineTransformMakeTranslation(imageSize.width, 0.0);
transform = CGAffineTransformScale(transform, -1.0, 1.0);
break;
case UIImageOrientationDown: //EXIF = 3
transform = CGAffineTransformMakeTranslation(imageSize.width, imageSize.height);
transform = CGAffineTransformRotate(transform, M_PI);
break;
case UIImageOrientationDownMirrored: //EXIF = 4
transform = CGAffineTransformMakeTranslation(0.0, imageSize.height);
transform = CGAffineTransformScale(transform, 1.0, -1.0);
break;
case UIImageOrientationLeftMirrored: //EXIF = 5
boundHeight = bounds.size.height;
bounds.size.height = bounds.size.width;
bounds.size.width = boundHeight;
transform = CGAffineTransformMakeTranslation(imageSize.height, imageSize.width);
transform = CGAffineTransformScale(transform, -1.0, 1.0);
transform = CGAffineTransformRotate(transform, 3.0 * M_PI / 2.0);
break;
case UIImageOrientationLeft: //EXIF = 6
boundHeight = bounds.size.height;
bounds.size.height = bounds.size.width;
bounds.size.width = boundHeight;
transform = CGAffineTransformMakeTranslation(0.0, imageSize.width);
transform = CGAffineTransformRotate(transform, 3.0 * M_PI / 2.0);
break;
case UIImageOrientationRightMirrored: //EXIF = 7
boundHeight = bounds.size.height;
bounds.size.height = bounds.size.width;
bounds.size.width = boundHeight;
transform = CGAffineTransformMakeScale(-1.0, 1.0);
transform = CGAffineTransformRotate(transform, M_PI / 2.0);
break;
case UIImageOrientationRight: //EXIF = 8
boundHeight = bounds.size.height;
bounds.size.height = bounds.size.width;
bounds.size.width = boundHeight;
transform = CGAffineTransformMakeTranslation(imageSize.height, 0.0);
transform = CGAffineTransformRotate(transform, M_PI / 2.0);
break;
default:
[NSException raise:NSInternalInconsistencyException format:#"Invalid image orientation"];
}
UIGraphicsBeginImageContext(bounds.size);
CGContextRef context = UIGraphicsGetCurrentContext();
if (orient == UIImageOrientationRight || orient == UIImageOrientationLeft) {
CGContextScaleCTM(context, -scaleRatio, scaleRatio);
CGContextTranslateCTM(context, -height, 0);
}
else {
CGContextScaleCTM(context, scaleRatio, -scaleRatio);
CGContextTranslateCTM(context, 0, -height);
}
CGContextConcatCTM(context, transform);
CGContextDrawImage(UIGraphicsGetCurrentContext(), CGRectMake(0, 0, width, height), imgRef);
UIImage *imageCopy = UIGraphicsGetImageFromCurrentImageContext();
UIGraphicsEndImageContext();
return imageCopy;
}

effective way to generate thumbnail from uiimage

I have an UICollectionView and each item shows a square image.
Each image is big file (> 3MB) and every time app try to jump into this view. It will delay 2-3 seconds.
I try to create a thumbnail from big file then apply to each collection item. But it seems not saving time.
Is there any effective way ?
Follow is the method I use to create thumbnail
-(UIImage*)resizedImageToSize:(CGSize)dstSize{
CGImageRef imgRef = self.CGImage;
// the below values are regardless of orientation : for UIImages from Camera, width>height (landscape)
CGSize srcSize = CGSizeMake(CGImageGetWidth(imgRef), CGImageGetHeight(imgRef)); // not equivalent to self.size (which is dependant on the imageOrientation)!
/* Don't resize if we already meet the required destination size. */
if (CGSizeEqualToSize(srcSize, dstSize)) {
return self;
}
CGFloat scaleRatio = dstSize.width / srcSize.width;
UIImageOrientation orient = self.imageOrientation;
CGAffineTransform transform = CGAffineTransformIdentity;
switch(orient) {
case UIImageOrientationUp: //EXIF = 1
transform = CGAffineTransformIdentity;
break;
case UIImageOrientationUpMirrored: //EXIF = 2
transform = CGAffineTransformMakeTranslation(srcSize.width, 0.0);
transform = CGAffineTransformScale(transform, -1.0, 1.0);
break;
case UIImageOrientationDown: //EXIF = 3
transform = CGAffineTransformMakeTranslation(srcSize.width, srcSize.height);
transform = CGAffineTransformRotate(transform, M_PI);
break;
case UIImageOrientationDownMirrored: //EXIF = 4
transform = CGAffineTransformMakeTranslation(0.0, srcSize.height);
transform = CGAffineTransformScale(transform, 1.0, -1.0);
break;
case UIImageOrientationLeftMirrored: //EXIF = 5
dstSize = CGSizeMake(dstSize.height, dstSize.width);
transform = CGAffineTransformMakeTranslation(srcSize.height, srcSize.width);
transform = CGAffineTransformScale(transform, -1.0, 1.0);
transform = CGAffineTransformRotate(transform, 3.0 * M_PI_2);
break;
case UIImageOrientationLeft: //EXIF = 6
dstSize = CGSizeMake(dstSize.height, dstSize.width);
transform = CGAffineTransformMakeTranslation(0.0, srcSize.width);
transform = CGAffineTransformRotate(transform, 3.0 * M_PI_2);
break;
case UIImageOrientationRightMirrored: //EXIF = 7
dstSize = CGSizeMake(dstSize.height, dstSize.width);
transform = CGAffineTransformMakeScale(-1.0, 1.0);
transform = CGAffineTransformRotate(transform, M_PI_2);
break;
case UIImageOrientationRight: //EXIF = 8
dstSize = CGSizeMake(dstSize.height, dstSize.width);
transform = CGAffineTransformMakeTranslation(srcSize.height, 0.0);
transform = CGAffineTransformRotate(transform, M_PI_2);
break;
default:
[NSException raise:NSInternalInconsistencyException format:#"Invalid image orientation"];
}
/////////////////////////////////////////////////////////////////////////////
// The actual resize: draw the image on a new context, applying a transform matrix
UIGraphicsBeginImageContextWithOptions(dstSize, NO, self.scale);
CGContextRef context = UIGraphicsGetCurrentContext();
if (!context) {
return nil;
}
if (orient == UIImageOrientationRight || orient == UIImageOrientationLeft) {
CGContextScaleCTM(context, -scaleRatio, scaleRatio);
CGContextTranslateCTM(context, -srcSize.height, 0);
} else {
CGContextScaleCTM(context, scaleRatio, -scaleRatio);
CGContextTranslateCTM(context, 0, -srcSize.height);
}
CGContextConcatCTM(context, transform);
// we use srcSize (and not dstSize) as the size to specify is in user space (and we use the CTM to apply a scaleRatio)
CGContextDrawImage(UIGraphicsGetCurrentContext(), CGRectMake(0, 0, srcSize.width, srcSize.height), imgRef);
UIImage* resizedImage = UIGraphicsGetImageFromCurrentImageContext();
UIGraphicsEndImageContext();
return resizedImage;
}
There's a built in way to create thumbnails from compressed image files: directly using ImageIO. It is lightning fast compared to rendering the image into a smaller bitmap context.
Here's a method that does the job:
+ (UIImage *)thumbnailWithContentsOfURL:(NSURL *)URL maxPixelSize:(CGFloat)maxPixelSize
{
CGImageSourceRef imageSource = CGImageSourceCreateWithURL((__bridge CFURLRef)URL, NULL);
NSAssert(imageSource != NULL, #"cannot create image source");
NSDictionary *imageOptions = #{
(NSString const *)kCGImageSourceCreateThumbnailFromImageIfAbsent : (NSNumber const *)kCFBooleanTrue,
(NSString const *)kCGImageSourceThumbnailMaxPixelSize : #(maxPixelSize),
(NSString const *)kCGImageSourceCreateThumbnailWithTransform : (NSNumber const *)kCFBooleanTrue
};
CGImageRef thumbnail = CGImageSourceCreateThumbnailAtIndex(imageSource, 0, (__bridge CFDictionaryRef)imageOptions);
CFRelease(imageSource);
UIImage *result = [[UIImage alloc] initWithCGImage:thumbnail];
CGImageRelease(thumbnail);
return result;
}

Screenshot code working perfectly in Simulator but not in iOS Device

This is my code for screenshot to save in library or do email. The problem is, it is working perfectly in Simulator but when i run this code in Device whatever on screen it only gets white image. I have tried with iOS 5 and iOS 6 both but no luck
What should be the reason Where i am wrong
NSInteger myDataLength = 320 * 430 * 4;
GLubyte *buffer1 = (GLubyte *) malloc(myDataLength);
GLubyte *buffer2 = (GLubyte *) malloc(myDataLength);
//Read image memory form OpenGL
glReadPixels(0, 50, 320, 430, GL_RGBA, GL_UNSIGNED_BYTE, buffer1);
//Invert result image buffer into secondary buffer
for(int y = 0; y < 430; y++) {
for(int x = 0; x < 320 * 4; x++) {
buffer2[(429 - y) * 320 * 4 + x] = buffer1[y * 4 * 320 + x];
}
}
//Create bitmap context
CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB();
CGContextRef destContext = CGBitmapContextCreate(buffer2, 320, 430, 8, 320 * 4, colorSpace, kCGImageAlphaPremultipliedLast | kCGBitmapByteOrder32Big);
//Get image from context
CGImageRef resultContext = CGBitmapContextCreateImage(destContext);
UIImage *resultImg = [UIImage imageWithCGImage: resultContext];
CGImageRelease(resultContext);
//Send to mail or Save to PhotoLibrary
if (sendMail) {
[self emailImage: resultImg];
} else {
UIImageWriteToSavedPhotosAlbum(resultImg, nil, nil, nil);
}
//Release allocated memory
// [resultImg release];
free(buffer2);
free(buffer1);
CGContextRelease(destContext);
CGColorSpaceRelease(colorSpace);
is there any class i am using which is supported by Simulator and not the device If it so then which one because as far as i search i found nothing like that.
I have found that My above code is perfect and the issue is not lie here.actually the problem is with
eaglLayer.drawableProperties
I have changed from this code
eaglLayer.drawableProperties = [NSDictionary dictionaryWithObjectsAndKeys:
[NSNumber numberWithBool:NO],
kEAGLDrawablePropertyRetainedBacking,
kEAGLColorFormatRGB565,
kEAGLDrawablePropertyColorFormat, nil];
To this
eaglLayer.drawableProperties = [NSDictionary dictionaryWithObjectsAndKeys:
[NSNumber numberWithBool:YES],
kEAGLDrawablePropertyRetainedBacking,
kEAGLColorFormatRGB565,
kEAGLDrawablePropertyColorFormat, nil];
I just set
kEAGLDrawablePropertyRetainedBacking = YES
And thanks GOD it is working fine.
but dont know why if any one know please let me know
try this one, it works perfect for me on the device:
+ (UIImage *)imageFromView:(UIView *)view inRect:(CGRect)rect {
CGFloat screenScale = [[UIScreen mainScreen] scale];
if ([view respondsToSelector:#selector(contentSize)]) {
UIScrollView *scrollView = (UIScrollView *)view;
UIGraphicsBeginImageContextWithOptions(scrollView.contentSize, NO, screenScale);
} else {
UIGraphicsBeginImageContextWithOptions(view.bounds.size, NO, screenScale);
}
CGContextRef resizedContext = UIGraphicsGetCurrentContext();
[view.layer renderInContext:resizedContext];
UIImage *image = UIGraphicsGetImageFromCurrentImageContext();
image = [UIImage imageWithCGImage:CGImageCreateWithImageInRect(image.CGImage, CGRectMake(rect.origin.x*screenScale, rect.origin.y*screenScale, rect.size.width*screenScale, rect.size.height*screenScale))];
UIGraphicsEndImageContext();
return [image imageScaledToSize:CGSizeMake(rect.size.width, rect.size.height)];
}
- (UIImage *)imageScaledToSize:(CGSize)newSize {
if ((self.size.width == newSize.width) && (self.size.height == newSize.height)) {
return self;
}
UIGraphicsBeginImageContextWithOptions(newSize, NO, 0.0);
[self drawInRect:CGRectMake(0, 0, newSize.width, newSize.height)];
UIImage *newImage = UIGraphicsGetImageFromCurrentImageContext();
UIGraphicsEndImageContext();
return newImage;
}

Rotate, Change Colors and Get RGB565 data from NSImage

I have found myself in a situation where I have several NSImage objects that I need to rotate by 90 degrees, change the colour of pixels that are one colour to another colour and then get the RGB565 data representation for it as an NSData object.
I found the vImageConvert_ARGB8888toRGB565 function in the Accelerate framework so this should be able to do the RGB565 output.
There are a few UIImage rotation I have found here on StackOverflow, but I'm having trouble converting them to NSImage as it appears I have to use NSGraphicsContext not CGContextRef?
Ideally I would like these in an NSImage Category so I can just call.
NSImage *rotated = [inputImage rotateByDegrees:90];
NSImage *colored = [rotated changeColorFrom:[NSColor redColor] toColor:[NSColor blackColor]];
NSData *rgb565 = [colored rgb565Data];
I just don't know where to start as image manipulation is new to me.
I appreciate any help I can get.
Edit (22/04/2013)
I have managed to piece this code together to generate the RGB565 data, it generates it upside down and with some small artefacts, I assume the first is due to different coordinate systems being used and the second possibly due to me going from PNG to BMP. I will do some more testing using a BMP to start and also a non-tranparent PNG.
- (NSData *)RGB565Data
{
CGContextRef cgctx = CreateARGBBitmapContext(self.CGImage);
if (cgctx == NULL)
return nil;
size_t w = CGImageGetWidth(self.CGImage);
size_t h = CGImageGetHeight(self.CGImage);
CGRect rect = {{0,0},{w,h}};
CGContextDrawImage(cgctx, rect, self.CGImage);
void *data = CGBitmapContextGetData (cgctx);
CGContextRelease(cgctx);
if (!data)
return nil;
vImage_Buffer src;
src.data = data;
src.width = w;
src.height = h;
src.rowBytes = (w * 4);
void* destData = malloc((w * 2) * h);
vImage_Buffer dst;
dst.data = destData;
dst.width = w;
dst.height = h;
dst.rowBytes = (w * 2);
vImageConvert_ARGB8888toRGB565(&src, &dst, 0);
size_t dataSize = 2 * w * h; // RGB565 = 2 5-bit components and 1 6-bit (16 bits/2 bytes)
NSData *RGB565Data = [NSData dataWithBytes:dst.data length:dataSize];
free(destData);
return RGB565Data;
}
- (CGImageRef)CGImage
{
return [self CGImageForProposedRect:NULL context:[NSGraphicsContext currentContext] hints:nil];
}
CGContextRef CreateARGBBitmapContext (CGImageRef inImage)
{
CGContextRef context = NULL;
CGColorSpaceRef colorSpace;
void * bitmapData;
int bitmapByteCount;
int bitmapBytesPerRow;
size_t pixelsWide = CGImageGetWidth(inImage);
size_t pixelsHigh = CGImageGetHeight(inImage);
bitmapBytesPerRow = (int)(pixelsWide * 4);
bitmapByteCount = (int)(bitmapBytesPerRow * pixelsHigh);
colorSpace = CGColorSpaceCreateDeviceRGB();
if (colorSpace == NULL)
return nil;
bitmapData = malloc( bitmapByteCount );
if (bitmapData == NULL)
{
CGColorSpaceRelease( colorSpace );
return nil;
}
context = CGBitmapContextCreate (bitmapData,
pixelsWide,
pixelsHigh,
8,
bitmapBytesPerRow,
colorSpace,
kCGImageAlphaPremultipliedFirst);
if (context == NULL)
{
free (bitmapData);
fprintf (stderr, "Context not created!");
}
CGColorSpaceRelease( colorSpace );
return context;
}
For most of this, you'll want to use Core Image.
Rotation you can do with the CIAffineTransform filter. This takes an NSAffineTransform object. You may have already worked with that class before. (You could do the rotation with NSImage itself, but it's easier with Core Image and you'll probably need to use it for the next step anyway.)
I don't know what you mean by “change the colour of pixels that are one colour to another colour”; that could mean any of a lot of different things. Chances are, though, there's a filter for that.
I also don't know why you need 565 data specifically, but assuming you have a real need for that, you're correct that that function will be involved. Use CIContext's lowest-level rendering method to get 8-bit-per-component ARGB output, and then use that vImage function to convert it to 565 RGB.
I have managed to get what I want by using NSBitmapImageRep (accessing it with a bit of a hack). If anyone knows a better way of doing this, please do share.
The - (NSBitmapImageRep)bitmap method is my hack. The NSImage starts of having only an NSBitmapImageRep, however after the rotation method a CIImageRep is added which takes priority over the NSBitmapImageRep which breaks the colour code (as NSImage renders the CIImageRep which doesn't get colored).
BitmapImage.m (Subclass of NSImage)
CGContextRef CreateARGBBitmapContext (CGImageRef inImage)
{
CGContextRef context = NULL;
CGColorSpaceRef colorSpace;
void * bitmapData;
int bitmapByteCount;
int bitmapBytesPerRow;
size_t pixelsWide = CGImageGetWidth(inImage);
size_t pixelsHigh = CGImageGetHeight(inImage);
bitmapBytesPerRow = (int)(pixelsWide * 4);
bitmapByteCount = (int)(bitmapBytesPerRow * pixelsHigh);
colorSpace = CGColorSpaceCreateDeviceRGB();
if (colorSpace == NULL)
return nil;
bitmapData = malloc( bitmapByteCount );
if (bitmapData == NULL)
{
CGColorSpaceRelease( colorSpace );
return nil;
}
context = CGBitmapContextCreate (bitmapData,
pixelsWide,
pixelsHigh,
8,
bitmapBytesPerRow,
colorSpace,
kCGImageAlphaPremultipliedFirst);
if (context == NULL)
{
free (bitmapData);
fprintf (stderr, "Context not created!");
}
CGColorSpaceRelease( colorSpace );
return context;
}
- (NSData *)RGB565Data
{
CGContextRef cgctx = CreateARGBBitmapContext(self.CGImage);
if (cgctx == NULL)
return nil;
size_t w = CGImageGetWidth(self.CGImage);
size_t h = CGImageGetHeight(self.CGImage);
CGRect rect = {{0,0},{w,h}};
CGContextDrawImage(cgctx, rect, self.CGImage);
void *data = CGBitmapContextGetData (cgctx);
CGContextRelease(cgctx);
if (!data)
return nil;
vImage_Buffer src;
src.data = data;
src.width = w;
src.height = h;
src.rowBytes = (w * 4);
void* destData = malloc((w * 2) * h);
vImage_Buffer dst;
dst.data = destData;
dst.width = w;
dst.height = h;
dst.rowBytes = (w * 2);
vImageConvert_ARGB8888toRGB565(&src, &dst, 0);
size_t dataSize = 2 * w * h; // RGB565 = 2 5-bit components and 1 6-bit (16 bits/2 bytes)
NSData *RGB565Data = [NSData dataWithBytes:dst.data length:dataSize];
free(destData);
return RGB565Data;
}
- (NSBitmapImageRep*)bitmap
{
NSBitmapImageRep *bitmap = nil;
NSMutableArray *repsToRemove = [NSMutableArray array];
// Iterate through the representations that back the NSImage
for (NSImageRep *rep in self.representations)
{
// If the representation is a bitmap
if ([rep isKindOfClass:[NSBitmapImageRep class]])
{
bitmap = [(NSBitmapImageRep*)rep retain];
break;
}
else
{
[repsToRemove addObject:rep];
}
}
// If no bitmap representation was found, we create one (this shouldn't occur)
if (bitmap == nil)
{
bitmap = [[[NSBitmapImageRep alloc] initWithCGImage:self.CGImage] retain];
[self addRepresentation:bitmap];
}
for (NSImageRep *rep2 in repsToRemove)
{
[self removeRepresentation:rep2];
}
return [bitmap autorelease];
}
- (NSColor*)colorAtX:(NSInteger)x y:(NSInteger)y
{
return [self.bitmap colorAtX:x y:y];
}
- (void)setColor:(NSColor*)color atX:(NSInteger)x y:(NSInteger)y
{
[self.bitmap setColor:color atX:x y:y];
}
NSImage+Extra.m (NSImage Category)
- (CGImageRef)CGImage
{
return [self CGImageForProposedRect:NULL context:[NSGraphicsContext currentContext] hints:nil];
}
Usage
- (IBAction)load:(id)sender
{
NSOpenPanel* openDlg = [NSOpenPanel openPanel];
[openDlg setCanChooseFiles:YES];
[openDlg setCanChooseDirectories:YES];
if ( [openDlg runModalForDirectory:nil file:nil] == NSOKButton )
{
NSArray* files = [openDlg filenames];
for( int i = 0; i < [files count]; i++ )
{
NSString* fileName = [files objectAtIndex:i];
BitmapImage *image = [[BitmapImage alloc] initWithContentsOfFile:fileName];
imageView.image = image;
}
}
}
- (IBAction)colorize:(id)sender
{
float width = imageView.image.size.width;
float height = imageView.image.size.height;
BitmapImage *img = (BitmapImage*)imageView.image;
NSColor *newColor = [img colorAtX:1 y:1];
for (int x = 0; x <= width; x++)
{
for (int y = 0; y <= height; y++)
{
if ([img colorAtX:x y:y] == newColor)
{
[img setColor:[NSColor redColor] atX:x y:y];
}
}
}
[imageView setNeedsDisplay:YES];
}
- (IBAction)rotate:(id)sender
{
BitmapImage *img = (BitmapImage*)imageView.image;
BitmapImage *newImg = [img rotate90DegreesClockwise:NO];
imageView.image = newImg;
}
Edit (24/04/2013)
I have changed the following code:
- (RGBColor)colorAtX:(NSInteger)x y:(NSInteger)y
{
NSUInteger components[4];
[self.bitmap getPixel:components atX:x y:y];
//NSLog(#"R: %ld, G:%ld, B:%ld", components[0], components[1], components[2]);
RGBColor color = {components[0], components[1], components[2]};
return color;
}
- (BOOL)color:(RGBColor)a isEqualToColor:(RGBColor)b
{
return ((a.red == b.red) && (a.green == b.green) && (a.blue == b.blue));
}
- (void)setColor:(RGBColor)color atX:(NSUInteger)x y:(NSUInteger)y
{
NSUInteger components[4] = {(NSUInteger)color.red, (NSUInteger)color.green, (NSUInteger)color.blue, 255};
//NSLog(#"R: %ld, G: %ld, B: %ld", components[0], components[1], components[2]);
[self.bitmap setPixel:components atX:x y:y];
}
- (IBAction)colorize:(id)sender
{
float width = imageView.image.size.width;
float height = imageView.image.size.height;
BitmapImage *img = (BitmapImage*)imageView.image;
RGBColor oldColor = [img colorAtX:0 y:0];
RGBColor newColor;// = {255, 0, 0};
newColor.red = 255;
newColor.green = 0;
newColor.blue = 0;
for (int x = 0; x <= width; x++)
{
for (int y = 0; y <= height; y++)
{
if ([img color:[img colorAtX:x y:y] isEqualToColor:oldColor])
{
[img setColor:newColor atX:x y:y];
}
}
}
[imageView setNeedsDisplay:YES];
}
But now it changes the pixels to red the first time and then blue the second time the colorize method is called.
Edit 2 (24/04/2013)
The following code fixes it. It was because the rotation code was adding an alpha channel to the NSBitmapImageRep.
- (RGBColor)colorAtX:(NSInteger)x y:(NSInteger)y
{
if (self.bitmap.hasAlpha)
{
NSUInteger components[4];
[self.bitmap getPixel:components atX:x y:y];
RGBColor color = {components[1], components[2], components[3]};
return color;
}
else
{
NSUInteger components[3];
[self.bitmap getPixel:components atX:x y:y];
RGBColor color = {components[0], components[1], components[2]};
return color;
}
}
- (void)setColor:(RGBColor)color atX:(NSUInteger)x y:(NSUInteger)y
{
if (self.bitmap.hasAlpha)
{
NSUInteger components[4] = {255, (NSUInteger)color.red, (NSUInteger)color.green, (NSUInteger)color.blue};
[self.bitmap setPixel:components atX:x y:y];
}
else
{
NSUInteger components[3] = {color.red, color.green, color.blue};
[self.bitmap setPixel:components atX:x y:y];
}
}
Ok, I decided to spend the day researching Peter's suggestion of using CoreImage.
I had done some research previously and decided it was too hard but after an entire day of research I finally worked out what I needed to do and amazingly it couldn't be easier.
Early on I had decided that the Apple ChromaKey Core Image example would be a great starting point but the example code frightened me off due to the 3-dimensional colour cube. After watching the WWDC 2012 video on Core Image and finding some sample code on github (https://github.com/vhbit/ColorCubeSample) I decided to jump in and just give it a go.
Here are the important parts of the working code, I haven't included the RGB565Data method as I haven't written it yet, but it should be easy using the method Peter suggested:
CIImage+Extras.h
- (NSImage*) NSImage;
- (CIImage*) imageRotated90DegreesClockwise:(BOOL)clockwise;
- (CIImage*) imageWithChromaColor:(NSColor*)chromaColor BackgroundColor:(NSColor*)backColor;
- (NSColor*) colorAtX:(NSUInteger)x y:(NSUInteger)y;
CIImage+Extras.m
- (NSImage*) NSImage
{
CGContextRef cg = [[NSGraphicsContext currentContext] graphicsPort];
CIContext *context = [CIContext contextWithCGContext:cg options:nil];
CGImageRef cgImage = [context createCGImage:self fromRect:self.extent];
NSImage *image = [[NSImage alloc] initWithCGImage:cgImage size:NSZeroSize];
return [image autorelease];
}
- (CIImage*) imageRotated90DegreesClockwise:(BOOL)clockwise
{
CIImage *im = self;
CIFilter *f = [CIFilter filterWithName:#"CIAffineTransform"];
NSAffineTransform *t = [NSAffineTransform transform];
[t rotateByDegrees:clockwise ? -90 : 90];
[f setValue:t forKey:#"inputTransform"];
[f setValue:im forKey:#"inputImage"];
im = [f valueForKey:#"outputImage"];
CGRect extent = [im extent];
f = [CIFilter filterWithName:#"CIAffineTransform"];
t = [NSAffineTransform transform];
[t translateXBy:-extent.origin.x
yBy:-extent.origin.y];
[f setValue:t forKey:#"inputTransform"];
[f setValue:im forKey:#"inputImage"];
im = [f valueForKey:#"outputImage"];
return im;
}
- (CIImage*) imageWithChromaColor:(NSColor*)chromaColor BackgroundColor:(NSColor*)backColor
{
CIImage *im = self;
CIColor *backCIColor = [[CIColor alloc] initWithColor:backColor];
CIImage *backImage = [CIImage imageWithColor:backCIColor];
backImage = [backImage imageByCroppingToRect:self.extent];
[backCIColor release];
float chroma[3];
chroma[0] = chromaColor.redComponent;
chroma[1] = chromaColor.greenComponent;
chroma[2] = chromaColor.blueComponent;
// Allocate memory
const unsigned int size = 64;
const unsigned int cubeDataSize = size * size * size * sizeof (float) * 4;
float *cubeData = (float *)malloc (cubeDataSize);
float rgb[3];//, *c = cubeData;
// Populate cube with a simple gradient going from 0 to 1
size_t offset = 0;
for (int z = 0; z < size; z++){
rgb[2] = ((double)z)/(size-1); // Blue value
for (int y = 0; y < size; y++){
rgb[1] = ((double)y)/(size-1); // Green value
for (int x = 0; x < size; x ++){
rgb[0] = ((double)x)/(size-1); // Red value
float alpha = ((rgb[0] == chroma[0]) && (rgb[1] == chroma[1]) && (rgb[2] == chroma[2])) ? 0.0 : 1.0;
cubeData[offset] = rgb[0] * alpha;
cubeData[offset+1] = rgb[1] * alpha;
cubeData[offset+2] = rgb[2] * alpha;
cubeData[offset+3] = alpha;
offset += 4;
}
}
}
// Create memory with the cube data
NSData *data = [NSData dataWithBytesNoCopy:cubeData
length:cubeDataSize
freeWhenDone:YES];
CIFilter *colorCube = [CIFilter filterWithName:#"CIColorCube"];
[colorCube setValue:[NSNumber numberWithInt:size] forKey:#"inputCubeDimension"];
// Set data for cube
[colorCube setValue:data forKey:#"inputCubeData"];
[colorCube setValue:im forKey:#"inputImage"];
im = [colorCube valueForKey:#"outputImage"];
CIFilter *sourceOver = [CIFilter filterWithName:#"CISourceOverCompositing"];
[sourceOver setValue:im forKey:#"inputImage"];
[sourceOver setValue:backImage forKey:#"inputBackgroundImage"];
im = [sourceOver valueForKey:#"outputImage"];
return im;
}
- (NSColor*)colorAtX:(NSUInteger)x y:(NSUInteger)y
{
NSBitmapImageRep* bitmap = [[NSBitmapImageRep alloc] initWithCIImage:self];
NSColor *color = [bitmap colorAtX:x y:y];
[bitmap release];
return color;
}

TTImageView problem

I want to display some TTImageView in a TTScrollView.
- (UIView*)scrollView:(TTScrollView*)scrollView pageAtIndex:(NSInteger)pageIndex {
TTView* pageView = nil;
if (!pageView) {
pageView = [[[TTView alloc] init] autorelease];
pageView.backgroundColor = [UIColor clearColor];
pageView.userInteractionEnabled = NO;
}
NSString *imagePath = [self.screenShotImgPath objectAtIndex:pageIndex];
TTImageView *imageView = [[[TTImageView alloc] initWithFrame:CGRectZero] autorelease];
imageView.autoresizesToImage = YES;
imageView.urlPath = imagePath;
imageView.backgroundColor = [UIColor clearColor];
imageView.delegate = self;
//here I need to rotate image
if (imageView.width > imageView.height){
CGAffineTransform tran = CGAffineTransformIdentity;
CGSize bnds = imageView.size;
CGSize bnds2 = imageView.size;
bnds = [self swapWidthAndHeight:bnds];
tran = CGAffineTransformMakeTranslation(0.0,bnds2.width);
tran = CGAffineTransformRotate(tran, [self degreesToRadians:-90.0]);
imageView.transform = tran;
}
//here I need to scale image view frame to fit image actual scale
CGFloat scale = imageView.width/imageView.height;
CGFloat scaledWidth = (scrollView.height - 20.f) * scale;
imageView.frame = CGRectMake((scrollView.width - scaledWidth)/2, 10.f, scaledWidth, scrollView.height - 20.f);
[pageView addSubview:imageView];
return pageView;
}
but there is a problem: TTImageView download image asynchronously. So it is quit possible that when code go to
if (imageView.width > imageView.height){
this line the image is not loaded. So a exception is thrown.
Since the image view is added in
- (UIView*)scrollView:(TTScrollView*)scrollView pageAtIndex:(NSInteger)pageIndex {
so I can't move the below rotate code out of this method
if (imageView.width > imageView.height){
CGAffineTransform tran = CGAffineTransformIdentity;
CGSize bnds = imageView.size;
CGSize bnds2 = imageView.size;
bnds = [self swapWidthAndHeight:bnds];
tran = CGAffineTransformMakeTranslation(0.0,bnds2.width);
tran = CGAffineTransformRotate(tran, [self degreesToRadians:-90.0]);
imageView.transform = tran;
}
What I can do to solove this problem?
You can add a <TTImageViewDelegate> in your .h file
and this function in the .m:
#pragma mark TTImageViewDelegate
-(void) imageView:(TTImageView *)imageView didLoadImage:(UIImage *)image{
if (image){
//do your logic
}
}
This function will be triggered when the image is finished loading.