ios: change the colors of a UIImage - objective-c

I am working on an Iphone application.
I have png pictures that represents symbols. symbols are all black with a transparent background.
Is there a way I can turn the black color into another one? What I'm looking for is something that can help me choose the color I want and when using the symbols (in a UIImage) I can make them appear the color of my choice.
I have searched around and found a framework called OpenCV that can manipulate images but I cant find out how to recolor the picture.
Any help and suggestion is greatly appreciated.
Thank you

The easiest and shortest:
Way to do that in case when you dealing with UIImageView:
Obj-C:
theImageView.image = [theImageView.image imageWithRenderingMode:UIImageRenderingModeAlwaysTemplate];
[theImageView setTintColor:[UIColor redColor]];
Swift:
let theImageView = UIImageView(image: UIImage(named:"foo")!.imageWithRenderingMode(UIImageRenderingMode.AlwaysTemplate))
theImageView.tintColor = UIColor.redColor()

hi you want to change remove/ one specific color means use the below category....
.h file:
#import <UIKit/UIKit.h>
#interface UIImage (Color)
+ (UIImage*)setBackgroundImageByColor:(UIColor *)backgroundColor withFrame:(CGRect )rect;
+ (UIImage*) replaceColor:(UIColor*)color inImage:(UIImage*)image withTolerance:(float)tolerance;
+(UIImage *)changeWhiteColorTransparent: (UIImage *)image;
+(UIImage *)changeColorTo:(NSMutableArray*) array Transparent: (UIImage *)image;
//resizing Stuff...
+ (UIImage *)imageWithImage:(UIImage *)image scaledToSize:(CGSize)newSize;
#end
.m file
#import <QuartzCore/QuartzCore.h>
#import "UIImage+Color.h"
#implementation UIImage (Color)
+ (UIImage* )setBackgroundImageByColor:(UIColor *)backgroundColor withFrame:(CGRect )rect{
// tcv - temporary colored view
UIView *tcv = [[UIView alloc] initWithFrame:rect];
[tcv setBackgroundColor:backgroundColor];
// set up a graphics context of button's size
CGSize gcSize = tcv.frame.size;
UIGraphicsBeginImageContext(gcSize);
// add tcv's layer to context
[tcv.layer renderInContext:UIGraphicsGetCurrentContext()];
// create background image now
UIImage *image = UIGraphicsGetImageFromCurrentImageContext();
UIGraphicsEndImageContext();
return image;
// [tcv release];
}
+ (UIImage*) replaceColor:(UIColor*)color inImage:(UIImage*)image withTolerance:(float)tolerance {
CGImageRef imageRef = [image CGImage];
NSUInteger width = CGImageGetWidth(imageRef);
NSUInteger height = CGImageGetHeight(imageRef);
CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB();
NSUInteger bytesPerPixel = 4;
NSUInteger bytesPerRow = bytesPerPixel * width;
NSUInteger bitsPerComponent = 8;
NSUInteger bitmapByteCount = bytesPerRow * height;
unsigned char *rawData = (unsigned char*) calloc(bitmapByteCount, sizeof(unsigned char));
CGContextRef context = CGBitmapContextCreate(rawData, width, height,
bitsPerComponent, bytesPerRow, colorSpace,
kCGImageAlphaPremultipliedLast | kCGBitmapByteOrder32Big);
CGColorSpaceRelease(colorSpace);
CGContextDrawImage(context, CGRectMake(0, 0, width, height), imageRef);
CGColorRef cgColor = [color CGColor];
const CGFloat *components = CGColorGetComponents(cgColor);
float r = components[0];
float g = components[1];
float b = components[2];
//float a = components[3]; // not needed
r = r * 255.0;
g = g * 255.0;
b = b * 255.0;
const float redRange[2] = {
MAX(r - (tolerance / 2.0), 0.0),
MIN(r + (tolerance / 2.0), 255.0)
};
const float greenRange[2] = {
MAX(g - (tolerance / 2.0), 0.0),
MIN(g + (tolerance / 2.0), 255.0)
};
const float blueRange[2] = {
MAX(b - (tolerance / 2.0), 0.0),
MIN(b + (tolerance / 2.0), 255.0)
};
int byteIndex = 0;
while (byteIndex < bitmapByteCount) {
unsigned char red = rawData[byteIndex];
unsigned char green = rawData[byteIndex + 1];
unsigned char blue = rawData[byteIndex + 2];
if (((red >= redRange[0]) && (red <= redRange[1])) &&
((green >= greenRange[0]) && (green <= greenRange[1])) &&
((blue >= blueRange[0]) && (blue <= blueRange[1]))) {
// make the pixel transparent
//
rawData[byteIndex] = 0;
rawData[byteIndex + 1] = 0;
rawData[byteIndex + 2] = 0;
rawData[byteIndex + 3] = 0;
}
byteIndex += 4;
}
UIImage *result = [UIImage imageWithCGImage:CGBitmapContextCreateImage(context)];
CGContextRelease(context);
free(rawData);
return result;
}
+(UIImage *)changeWhiteColorTransparent: (UIImage *)image
{
CGImageRef rawImageRef=image.CGImage;
const float colorMasking[6] = {222, 255, 222, 255, 222, 255};
UIGraphicsBeginImageContext(image.size);
CGImageRef maskedImageRef=CGImageCreateWithMaskingColors(rawImageRef, colorMasking);
{
//if in iphone
CGContextTranslateCTM(UIGraphicsGetCurrentContext(), 0.0, image.size.height);
CGContextScaleCTM(UIGraphicsGetCurrentContext(), 1.0, -1.0);
}
CGContextDrawImage(UIGraphicsGetCurrentContext(), CGRectMake(0, 0, image.size.width, image.size.height), maskedImageRef);
UIImage *result = UIGraphicsGetImageFromCurrentImageContext();
CGImageRelease(maskedImageRef);
UIGraphicsEndImageContext();
return result;
}
+(UIImage *)changeColorTo:(NSMutableArray*) array Transparent: (UIImage *)image
{
CGImageRef rawImageRef=image.CGImage;
// const float colorMasking[6] = {222, 255, 222, 255, 222, 255};
const float colorMasking[6] = {[[array objectAtIndex:0] floatValue], [[array objectAtIndex:1] floatValue], [[array objectAtIndex:2] floatValue], [[array objectAtIndex:3] floatValue], [[array objectAtIndex:4] floatValue], [[array objectAtIndex:5] floatValue]};
UIGraphicsBeginImageContext(image.size);
CGImageRef maskedImageRef=CGImageCreateWithMaskingColors(rawImageRef, colorMasking);
{
//if in iphone
CGContextTranslateCTM(UIGraphicsGetCurrentContext(), 0.0, image.size.height);
CGContextScaleCTM(UIGraphicsGetCurrentContext(), 1.0, -1.0);
}
CGContextDrawImage(UIGraphicsGetCurrentContext(), CGRectMake(0, 0, image.size.width, image.size.height), maskedImageRef);
UIImage *result = UIGraphicsGetImageFromCurrentImageContext();
CGImageRelease(maskedImageRef);
UIGraphicsEndImageContext();
return result;
}
+ (UIImage *)imageWithImage:(UIImage *)image scaledToSize:(CGSize)newSize {
//UIGraphicsBeginImageContext(newSize);
UIGraphicsBeginImageContextWithOptions(newSize, NO, 0.0);
[image drawInRect:CGRectMake(0, 0, newSize.width, newSize.height)];
UIImage *newImage = UIGraphicsGetImageFromCurrentImageContext();
UIGraphicsEndImageContext();
return newImage;
}
#end
i changed the removed white color to transparent by this code....
the call will be...
self.rawImage.image=[UIImage changeWhiteColorTransparent:originalStateImage];
i hope this idea will help you....

hi use this category file to change the image entire color....
.h file:
#import <UIKit/UIKit.h>
#interface UIImage (AddtionalFunctionalities)
//TintColor...
- (UIImage *)imageWithTint:(UIColor *)tintColor;
//scale and resize...
-(UIImage*)scaleToSize:(CGSize)size;
#end
.m file:
#import "UIImage+AddtionalFunctionalities.h"
#implementation UIImage (AddtionalFunctionalities)
- (UIImage *)imageWithTint:(UIColor *)tintColor
{
// Begin drawing
CGRect aRect = CGRectMake(0.f, 0.f, self.size.width, self.size.height);
CGImageRef alphaMask;
//
// Compute mask flipping image
//
{
UIGraphicsBeginImageContext(aRect.size);
CGContextRef c = UIGraphicsGetCurrentContext();
// draw image
CGContextTranslateCTM(c, 0, aRect.size.height);
CGContextScaleCTM(c, 1.0, -1.0);
[self drawInRect: aRect];
alphaMask = CGBitmapContextCreateImage(c);
UIGraphicsEndImageContext();
}
//
UIGraphicsBeginImageContext(aRect.size);
// Get the graphic context
CGContextRef c = UIGraphicsGetCurrentContext();
// Draw the image
[self drawInRect:aRect];
// Mask
CGContextClipToMask(c, aRect, alphaMask);
// Set the fill color space
CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB();
CGContextSetFillColorSpace(c, colorSpace);
// Set the fill color
CGContextSetFillColorWithColor(c, tintColor.CGColor);
UIRectFillUsingBlendMode(aRect, kCGBlendModeNormal);
UIImage *img = UIGraphicsGetImageFromCurrentImageContext();
UIGraphicsEndImageContext();
// Release memory
CGColorSpaceRelease(colorSpace);
CGImageRelease(alphaMask);
return img;
}
-(UIImage*)scaleToSize:(CGSize)size
{
// Create a bitmap graphics context
// This will also set it as the current context
UIGraphicsBeginImageContext(size);
// Draw the scaled image in the current context
[self drawInRect:CGRectMake(0, 0, size.width, size.height)];
// Create a new image from current context
UIImage* scaledImage = UIGraphicsGetImageFromCurrentImageContext();
// Pop the current context from the stack
UIGraphicsEndImageContext();
// Return our new scaled image
return scaledImage;
}
#end
the method call will be :
self.outputImage.image=[sourceImage imageWithTint:[UIColor redColor]];
if u want to use the image means use this:
self.outputImage.image=[sourceImage imageWithTint:[UIColor colorWithPatternImage:[UIImage imageNamed: #"red.jpg"]]];
i hope this will help you...

Set the Image rendering mode then for color purpose use tintcolor property.
yourImageView.image = [image imageWithRenderingMode:UIImageRenderingModeAlwaysTemplate];
[yourImageView setTintColor:[UIColor redColor]];

You can create UIView which store UIImageView and change UIview background Color.
UIView *viewForImage = ....;
UIImageView *imageView = .....;
...
[viewForImage addSubview: imageView];
[self.view addSubview:viewForImage];
...
and then use for example
[viewForImage setBackgroundColor: [UIColor redColor]];

For playing with images using objective-c, mainly you have to use Quartz 2D/CoreGraphics framework. I suppose it will be the easiest way for you to accomplish your task.
The link to Quartz 2D guideline is here
, the guideline is pretty comprehensive. Just have a look somewhere in filling the bitmap with color.
Also I've some records about the heavy image processing in my blog, you can have a look, it might be helpfull.
http://levonp.blogspot.de/2012/05/quartz-getting-diff-images-of-two.html
Of course you can do it also using more advanced stuffs like OpenGL, OpenCV, etc.

A very simple solution to change the color of the image. Idea: extensions for UIImage and UIImageView.
Swift 3, Xcode 8.1.
https://stackoverflow.com/a/40884483/4488252
Code sample from my answer (link above):
// use extension UIImage
newImageView = createNewImageView(x: 100)
newImageView.image = UIImage(named: "Apple")?.imageWithColor(newColor: UIColor.blue)
// use extension UIImageView
newImageView = createNewImageView(x: 160)
newImageView.image = UIImage(named: "Apple")
newImageView.imageColor = UIColor.green

For iOS 13.0
Obj-C:
self.yourImageView.image = [self.yourImageView.image imageWithTintColor:[UIColor.redColor]];
Swift:
yourImageView.image = yourImageView.image.withTintColor(UIColor.red);
If the image comes from the Asset Catalog, you can change the rendering in the Attribute Inspector

Related

Detect tap on curved line?

I'm trying to draw a line with a specific width. I searched for examples online, but I only found examples using straight lines. I need curved lines. Also, I need to detect if the user touched within the line. Is it possible to achieve this using Objective C and Sprite Kit? If so can someone provide an example?
You can use UIBezierPath to create bezier curves (nice smooth curves). You can specify this path for a CAShapeLayer and add that as a sublayer to your view:
UIBezierPath *path = [UIBezierPath bezierPath];
[path moveToPoint:CGPointMake(10, 150)];
[path addCurveToPoint:CGPointMake(110, 150) controlPoint1:CGPointMake(40, 100) controlPoint2:CGPointMake(80, 100)];
[path addCurveToPoint:CGPointMake(210, 150) controlPoint1:CGPointMake(140, 200) controlPoint2:CGPointMake(170, 200)];
[path addCurveToPoint:CGPointMake(310, 150) controlPoint1:CGPointMake(250, 100) controlPoint2:CGPointMake(280, 100)];
CAShapeLayer *layer = [CAShapeLayer layer];
layer.lineWidth = 10;
layer.strokeColor = [UIColor redColor].CGColor;
layer.fillColor = [UIColor clearColor].CGColor;
layer.path = path.CGPath;
[self.view.layer addSublayer:layer];
If you want to randomize it a little, you can just randomize some of the curves. If you want some fuzziness, add some shadow. If you want the ends to be round, specify a rounded line cap:
UIBezierPath *path = [UIBezierPath bezierPath];
CGPoint point = CGPointMake(10, 100);
[path moveToPoint:point];
CGPoint controlPoint1;
CGPoint controlPoint2 = CGPointMake(point.x - 5.0 - arc4random_uniform(50), 150.0);
for (NSInteger i = 0; i < 5; i++) {
controlPoint1 = CGPointMake(point.x + (point.x - controlPoint2.x), 50.0);
point.x += 40.0 + arc4random_uniform(20);
controlPoint2 = CGPointMake(point.x - 5.0 - arc4random_uniform(50), 150.0);
[path addCurveToPoint:point controlPoint1:controlPoint1 controlPoint2:controlPoint2];
}
CAShapeLayer *layer = [CAShapeLayer layer];
layer.lineWidth = 5;
layer.strokeColor = [UIColor redColor].CGColor;
layer.fillColor = [UIColor clearColor].CGColor;
layer.path = path.CGPath;
layer.shadowColor = [UIColor redColor].CGColor;
layer.shadowRadius = 2.0;
layer.shadowOpacity = 1.0;
layer.shadowOffset = CGSizeZero;
layer.lineCap = kCALineCapRound;
[self.view.layer addSublayer:layer];
If you want it to be even more irregular, break those beziers into smaller segments, but the idea would be the same. The only trick with conjoined bezier curves is that you want to make sure that the second control point of one curve is in line with the first control point of the next one, or else you end up with sharp discontinuities in the curves.
If you want to detect if and when a user taps on it, that's more complicated. But what you have to do is:
Make a snapshot of the image:
- (UIImage *)captureView:(UIView *)view
{
UIGraphicsBeginImageContextWithOptions(view.bounds.size, NO, 1.0); // usually I'd use 0.0, but we'll use 1.0 here so that the tap point of the gesture matches the pixel of the snapshot
if ([view respondsToSelector:#selector(drawViewHierarchyInRect:afterScreenUpdates:)]) {
BOOL success = [view drawViewHierarchyInRect:view.bounds afterScreenUpdates:YES];
NSAssert(success, #"drawViewHierarchyInRect failed");
} else {
[view.layer renderInContext:UIGraphicsGetCurrentContext()];
}
UIImage *image = UIGraphicsGetImageFromCurrentImageContext();
UIGraphicsEndImageContext();
return image;
}
Get the color of the pixel at the coordinate that the user tapped by identifying the color of the pixel the user tapped.
- (void)handleTap:(UITapGestureRecognizer *)gesture
{
CGPoint point = [gesture locationInView:gesture.view];
CGFloat red, green, blue, alpha;
UIColor *color = [self image:self.image colorAtPoint:point];
[color getRed:&red green:&green blue:&blue alpha:&alpha];
if (green < 0.9 && blue < 0.9 && red > 0.9)
NSLog(#"tapped on curve");
else
NSLog(#"didn't tap on curve");
}
Where I adapted Apple's code for getting the pixel buffer in order to determine the color of the pixel the user tapped on was.
// adapted from https://developer.apple.com/library/mac/qa/qa1509/_index.html
- (UIColor *)image:(UIImage *)image colorAtPoint:(CGPoint)point
{
UIColor *color;
CGImageRef imageRef = image.CGImage;
// Create the bitmap context
CGContextRef context = [self createARGBBitmapContextForImage:imageRef];
NSAssert(context, #"error creating context");
// Get image width, height. We'll use the entire image.
size_t width = CGImageGetWidth(imageRef);
size_t height = CGImageGetHeight(imageRef);
CGRect rect = {{0,0},{width,height}};
// Draw the image to the bitmap context. Once we draw, the memory
// allocated for the context for rendering will then contain the
// raw image data in the specified color space.
CGContextDrawImage(context, rect, imageRef);
// Now we can get a pointer to the image data associated with the bitmap
// context.
uint8_t *data = CGBitmapContextGetData (context);
if (data != NULL) {
size_t offset = (NSInteger) point.y * 4 * width + (NSInteger) point.x * 4;
uint8_t alpha = data[offset];
uint8_t red = data[offset+1];
uint8_t green = data[offset+2];
uint8_t blue = data[offset+3];
color = [UIColor colorWithRed:red / 255.0 green:green / 255.0 blue:blue / 255.0 alpha:alpha / 255.0];
}
// When finished, release the context
CGContextRelease(context);
// Free image data memory for the context
if (data) {
free(data); // we used malloc in createARGBBitmapContextForImage, so free it
}
return color;
}
- (CGContextRef) createARGBBitmapContextForImage:(CGImageRef) inImage
{
CGContextRef context = NULL;
CGColorSpaceRef colorSpace;
void * bitmapData;
size_t bitmapByteCount;
size_t bitmapBytesPerRow;
// Get image width, height. We'll use the entire image.
size_t pixelsWide = CGImageGetWidth(inImage);
size_t pixelsHigh = CGImageGetHeight(inImage);
// Declare the number of bytes per row. Each pixel in the bitmap in this
// example is represented by 4 bytes; 8 bits each of red, green, blue, and
// alpha.
bitmapBytesPerRow = (pixelsWide * 4);
bitmapByteCount = (bitmapBytesPerRow * pixelsHigh);
// Use the generic RGB color space.
colorSpace = CGColorSpaceCreateDeviceRGB(); // CGColorSpaceCreateDeviceWithName(kCGColorSpaceGenericRGB);
NSAssert(colorSpace, #"Error allocating color space");
// Allocate memory for image data. This is the destination in memory
// where any drawing to the bitmap context will be rendered.
bitmapData = malloc(bitmapByteCount);
NSAssert(bitmapData, #"Unable to allocate bitmap buffer");
// Create the bitmap context. We want pre-multiplied ARGB, 8-bits
// per component. Regardless of what the source image format is
// (CMYK, Grayscale, and so on) it will be converted over to the format
// specified here by CGBitmapContextCreate.
context = CGBitmapContextCreate (bitmapData,
pixelsWide,
pixelsHigh,
8, // bits per component
bitmapBytesPerRow,
colorSpace,
(CGBitmapInfo)kCGImageAlphaPremultipliedFirst);
NSAssert(context, #"Context not created!");
// Make sure and release colorspace before returning
CGColorSpaceRelease( colorSpace );
return context;
}

Can UIimage change the size and resolution of pictures?

If I pick a 640*960 picture and put it into a 200*300 UIImage,then will that picture uploaded to services as a 200*300 picture .
You can use following methods to resize your image:
+ (UIImage *) imageWithImage:(UIImage *)image scaledToSize:(CGSize)newSize {
//UIGraphicsBeginImageContext(newSize);
UIGraphicsBeginImageContextWithOptions(newSize, NO, 0.0);
[image drawInRect:CGRectMake(0, 0, newSize.width, newSize.height)];
UIImage *newImage = UIGraphicsGetImageFromCurrentImageContext();
UIGraphicsEndImageContext();
return newImage;
}
+ (UIImage *) imageWithImage: (UIImage*) sourceImage scaledToWidth: (float) i_width {//method to scale image accordcing to width
float oldWidth = sourceImage.size.width;
float scaleFactor = i_width / oldWidth;
float newHeight = sourceImage.size.height * scaleFactor;
float newWidth = oldWidth * scaleFactor;
UIGraphicsBeginImageContext(CGSizeMake(newWidth, newHeight));
[sourceImage drawInRect:CGRectMake(0, 0, newWidth, newHeight)];
UIImage *newImage = UIGraphicsGetImageFromCurrentImageContext();
UIGraphicsEndImageContext();
return newImage;
}
- (void)viewDidLoad
{
[super viewDidLoad];
// Create the image from a png file
imageOriginal = [UIImage imageNamed:#"bg-640by960.png"];
imageView =[[UIImageView alloc] initWithImage:imageOriginal];
imageView.tag=200;
// Get size of current image
size = [imageOriginal size];
// Frame location in view to show original image
[imageView setFrame:CGRectMake(0, 0, size.width, size.height)];
[[self view] addSubview:imageView];
[self.view bringSubviewToFront:btnCrop];
//To resize image call below method
[[self.view viewWithTag:200] removeFromSuperview];
[self squareImageWithImage:imageOriginal scaledToSize:CGSizeMake(200, 300)];
}
- (UIImage *)squareImageWithImage:(UIImage *)image scaledToSize:(CGSize)newSize
{
double ratio;
double delta;
CGPoint offset;
//make a new square size, that is the resized imaged width
CGSize sz = CGSizeMake(newSize.width, newSize.width);
//figure out if the picture is landscape or portrait, then
//calculate scale factor and offset
if (image.size.width > image.size.height) {
ratio = newSize.width / image.size.width;
delta = (ratio*image.size.width - ratio*image.size.height);
offset = CGPointMake(delta/2, 0);
} else {
ratio = newSize.width / image.size.height;
delta = (ratio*image.size.height - ratio*image.size.width);
offset = CGPointMake(0, delta/2);
}
//make the final clipping rect based on the calculated values
CGRect clipRect = CGRectMake(-offset.x, -offset.y,
(ratio * image.size.width) + delta,
(ratio * image.size.height) + delta);
//start a new context, with scale factor 0.0 so retina displays get
//high quality image
if ([[UIScreen mainScreen] respondsToSelector:#selector(scale)]) {
UIGraphicsBeginImageContextWithOptions(sz, YES, 0.0);
} else {
UIGraphicsBeginImageContext(sz);
}
UIRectClip(clipRect);
[image drawInRect:clipRect];
UIImage *newImage = UIGraphicsGetImageFromCurrentImageContext();
UIGraphicsEndImageContext();
imageView =
[[UIImageView alloc] initWithImage:newImage];
[imageView setFrame:CGRectMake(60, 60, newSize.width, newSize.height)];
[self.view addSubview:imageView];
return newImage;
}

Simple resizing of UIImage in XCODE

Is there any way how to resize UIImage in as few lines as possible? I don't mind of ratio, I just want to set image resolution to 80x60. That's all
This may be overkill but, you can simply take your image, and create a graphics context at that resolution you want, then you can set the tempImage as the UIImageView, overwriting it.
UIImage *image = YourImageView.image;
UIImage *tempImage = nil;
CGSize targetSize = CGSizeMake(80,60);
UIGraphicsBeginImageContext(targetSize);
CGRect thumbnailRect = CGRectMake(0, 0, 0, 0);
thumbnailRect.origin = CGPointMake(0.0,0.0);
thumbnailRect.size.width = targetSize.width;
thumbnailRect.size.height = targetSize.height;
[image drawInRect:thumbnailRect];
tempImage = UIGraphicsGetImageFromCurrentImageContext();
UIGraphicsEndImageContext();
YourImageView.image = tempImage;
- (void)resizeImage:(UIImage *)image
{
CGSize origImageSize = [image size];
CGRect imageRect = CGRectMake(0, 0, 80, 60);
float ratio = MAX(newRect.size.width / origImageSize.width,
newRect.size.height / origImageSize.height);
UIGraphicsBeginImageContextWithOptions(newRect.size, NO, 0.0);
UIBezierPath *path = [UIBezierPath bezierPathWithRoundedRect:newRect
cornerRadius:5.0];
[path addClip];
CGRect imageRect;
imageRect.size.width = ratio * origImageSize.width;
imageRect.size.height = ratio * origImageSize.height;
imageRect.origin.x = (newRect.size.width - imageRect.size.width) / 2.0;
imageRect.origin.y = (newRect.size.height - imageRect.size.height) / 2.0;
[image drawInRect:imageRect];
UIImage *smallImage = UIGraphicsGetImageFromCurrentImageContext();
NSData *data = UIImagePNGRepresentation(smallImage);
UIGraphicsEndImageContext();
}
Do not forget to add CoreGraphics frameawork.
Use this class (add code to .h file accordingly)
#import "UIImage+Resize.h"
#implementation UIImage (Resize)
- (UIImage *)resizedImage:(CGSize)bounds {
return [self resizedImage:bounds upScale:YES];
}
- (UIImage*)resizedImage:(CGSize)bounds upScale:(BOOL)upScale {
CGSize originalSize = self.size;
float xScale = bounds.width / originalSize.width;
float yScale = bounds.height / originalSize.height;
float scale = MIN(xScale, yScale);
if (!upScale) {
scale = MIN(scale, 1);
}
CGSize newSize = CGSizeMake(originalSize.width * scale, originalSize.height * scale);
UIGraphicsBeginImageContextWithOptions(newSize, NO, [UIScreen mainScreen].scale);
[self drawInRect:CGRectMake(0, 0, newSize.width, newSize.height)];
UIImage *resultImage = UIGraphicsGetImageFromCurrentImageContext();
UIGraphicsEndImageContext();
return resultImage;
}
I've found the following code on this page:
- (UIImage*)imageWithImage: (UIImage*) sourceImage scaledToWidth: (float) i_width{
if (sourceImage.size.width>sourceImage.size.height) {
sourceImage = [[UIImage alloc] initWithCGImage: sourceImage.CGImage
scale: 1.0
orientation: UIImageOrientationRight];
}
float oldWidth = sourceImage.size.width;
float scaleFactor = i_width / oldWidth;
float newHeight = sourceImage.size.height * scaleFactor;
float newWidth = oldWidth * scaleFactor;
UIGraphicsBeginImageContext(CGSizeMake(newWidth, newHeight)); [sourceImage drawInRect:CGRectMake(0, 0, newWidth, newHeight)];
UIImage *newImage = UIGraphicsGetImageFromCurrentImageContext(); UIGraphicsEndImageContext();
return newImage;
}
All you have to do is provide it with the i_width, and it will scale it accordingly.
I've added a little twist to it, so that if the picture is in landscape mode, it will be rotated to portrait, and then resized. If you want it to be the opposite (portrait to landscape), change this:
if (sourceImage.size.width>sourceImage.size.height) {
sourceImage = [[UIImage alloc] initWithCGImage: sourceImage.CGImage
scale: 1.0
orientation: UIImageOrientationRight];
}
to this:
if (sourceImage.size.height>sourceImage.size.width) {
sourceImage = [[UIImage alloc] initWithCGImage: sourceImage.CGImage
scale: 1.0
orientation: UIImageOrientationRight];
}
CAVEAT: my rotating method doesn't take into consideration if the picture is pointing left or right. In other words, If an image is landscape by upside down, my code can't recognise that. I hope someone else can shed some light on this though :)

drawRect drawing 'transparent' text?

I am looking to draw a UILabel (preferable through subclassing) as a transparent label, but with solid background. I draw up an quick example (sorry, it's ugly, but it gets the points across :)).
Basically I have a UILabel and I would like the background to be a set colour, and the text should be transparent. I do not want to colour the text with the views background, but instead have it be 100% transparent, since I have a texture in the background that I want to make sure lines up inside and outside of the label.
I've been spending the night browsing SO and searching on Google, but I have found no helpful sources. I don't have much experience with CG drawing, so I would appreciate any links, help, tutorial or sample code (maybe Apple has some I need to have a look at?).
Thanks a bunch!
I've rewritten it as a UILabel subclass using barely any code and posted it on GitHub
The gist of it is you override drawRect but call [super drawRect:rect] to let the UILabel render as normal. Using a white label color lets you easily use the label itself as a mask.
- (void)drawRect:(CGRect)rect
{
CGContextRef context = UIGraphicsGetCurrentContext();
// let the superclass draw the label normally
[super drawRect:rect];
CGContextConcatCTM(context, CGAffineTransformMake(1, 0, 0, -1, 0, CGRectGetHeight(rect)));
// create a mask from the normally rendered text
CGImageRef image = CGBitmapContextCreateImage(context);
CGImageRef mask = CGImageMaskCreate(CGImageGetWidth(image), CGImageGetHeight(image), CGImageGetBitsPerComponent(image), CGImageGetBitsPerPixel(image), CGImageGetBytesPerRow(image), CGImageGetDataProvider(image), CGImageGetDecode(image), CGImageGetShouldInterpolate(image));
CFRelease(image); image = NULL;
// wipe the slate clean
CGContextClearRect(context, rect);
CGContextSaveGState(context);
CGContextClipToMask(context, rect, mask);
CFRelease(mask); mask = NULL;
[self RS_drawBackgroundInRect:rect];
CGContextRestoreGState(context);
}
Solved using CALayer masks. Creating a standard mask (wallpapered text, for example) is simple. To create the knocked-out text, I had to invert the alpha channel of my mask, which involved rendering a label to a CGImageRef and then doing some pixel-pushing.
Sample application is available here: https://github.com/robinsenior/RSMaskedLabel
Relevant code is here to avoid future link-rot:
#import "RSMaskedLabel.h"
#import <QuartzCore/QuartzCore.h>
#interface UIImage (RSAdditions)
+ (UIImage *) imageWithView:(UIView *)view;
- (UIImage *) invertAlpha;
#end
#interface RSMaskedLabel ()
{
CGImageRef invertedAlphaImage;
}
#property (nonatomic, retain) UILabel *knockoutLabel;
#property (nonatomic, retain) CALayer *textLayer;
- (void) RS_commonInit;
#end
#implementation RSMaskedLabel
#synthesize knockoutLabel, textLayer;
- (id)initWithFrame:(CGRect)frame
{
self = [super initWithFrame:frame];
if (self)
{
[self RS_commonInit];
}
return self;
}
- (id)initWithCoder:(NSCoder *)aDecoder
{
self = [super initWithCoder:aDecoder];
if (self)
{
[self RS_commonInit];
}
return self;
}
+ (Class)layerClass
{
return [CAGradientLayer class];
}
- (void) RS_commonInit
{
[self setBackgroundColor:[UIColor clearColor]];
// create the UILabel for the text
knockoutLabel = [[UILabel alloc] initWithFrame:[self frame]];
[knockoutLabel setText:#"booyah"];
[knockoutLabel setTextAlignment:UITextAlignmentCenter];
[knockoutLabel setFont:[UIFont boldSystemFontOfSize:72.0]];
[knockoutLabel setNumberOfLines:1];
[knockoutLabel setBackgroundColor:[UIColor clearColor]];
[knockoutLabel setTextColor:[UIColor whiteColor]];
// create our filled area (in this case a gradient)
NSArray *colors = [[NSArray arrayWithObjects:
(id)[[UIColor colorWithRed:0.349 green:0.365 blue:0.376 alpha:1.000] CGColor],
(id)[[UIColor colorWithRed:0.455 green:0.490 blue:0.518 alpha:1.000] CGColor],
(id)[[UIColor colorWithRed:0.412 green:0.427 blue:0.439 alpha:1.000] CGColor],
(id)[[UIColor colorWithRed:0.208 green:0.224 blue:0.235 alpha:1.000] CGColor],
nil] retain];
NSArray *gradientLocations = [NSArray arrayWithObjects:
[NSNumber numberWithFloat:0.0],
[NSNumber numberWithFloat:0.54],
[NSNumber numberWithFloat:0.55],
[NSNumber numberWithFloat:1], nil];
// render our label to a UIImage
// if you remove the call to invertAlpha it will mask the text
invertedAlphaImage = [[[UIImage imageWithView:knockoutLabel] invertAlpha] CGImage];
// create a new CALayer to use as the mask
textLayer = [CALayer layer];
// stick the image in the layer
[textLayer setContents:(id)invertedAlphaImage];
// create a nice gradient layer to use as our fill
CAGradientLayer *gradientLayer = (CAGradientLayer *)[self layer];
[gradientLayer setBackgroundColor:[[UIColor clearColor] CGColor]];
[gradientLayer setColors: colors];
[gradientLayer setLocations:gradientLocations];
[gradientLayer setStartPoint:CGPointMake(0.0, 0.0)];
[gradientLayer setEndPoint:CGPointMake(0.0, 1.0)];
[gradientLayer setCornerRadius:10];
// mask the text layer onto our gradient
[gradientLayer setMask:textLayer];
}
- (void)layoutSubviews
{
// resize the text layer
[textLayer setFrame:[self bounds]];
}
- (void)dealloc
{
CGImageRelease(invertedAlphaImage);
[knockoutLabel release];
[textLayer release];
[super dealloc];
}
#end
#implementation UIImage (RSAdditions)
/*
create a UIImage from a UIView
*/
+ (UIImage *) imageWithView:(UIView *)view
{
UIGraphicsBeginImageContextWithOptions(view.bounds.size, NO, 0.0);
[view.layer renderInContext:UIGraphicsGetCurrentContext()];
UIImage * img = UIGraphicsGetImageFromCurrentImageContext();
UIGraphicsEndImageContext();
return img;
}
/*
get the image to invert its alpha channel
*/
- (UIImage *)invertAlpha
{
// scale is needed for retina devices
CGFloat scale = [self scale];
CGSize size = self.size;
int width = size.width * scale;
int height = size.height * scale;
CGColorSpaceRef colourSpace = CGColorSpaceCreateDeviceRGB();
unsigned char *memoryPool = (unsigned char *)calloc(width*height*4, 1);
CGContextRef context = CGBitmapContextCreate(memoryPool, width, height, 8, width * 4, colourSpace, kCGBitmapByteOrderDefault | kCGImageAlphaPremultipliedLast);
CGColorSpaceRelease(colourSpace);
CGContextDrawImage(context, CGRectMake(0, 0, width, height), [self CGImage]);
for(int y = 0; y < height; y++)
{
unsigned char *linePointer = &memoryPool[y * width * 4];
for(int x = 0; x < width; x++)
{
linePointer[3] = 255-linePointer[3];
linePointer += 4;
}
}
// get a CG image from the context, wrap that into a
CGImageRef cgImage = CGBitmapContextCreateImage(context);
UIImage *returnImage = [UIImage imageWithCGImage:cgImage scale:scale orientation:UIImageOrientationUp];
// clean up
CGImageRelease(cgImage);
CGContextRelease(context);
free(memoryPool);
// and return
return returnImage;
}
#end
Here's a technique that's similar to Matt Gallagher's, which will generate an inverted text mask with an image.
Allocate a (mutable) data buffer. Create a bitmap context with an 8-bit alpha channel. Configure settings for text drawing. Fill the whole buffer in copy mode (default colour assumed to have alpha value of 1). Write the text in clear mode (alpha value of 0). Create an image from the bitmap context. Use the bitmap as a mask to make a new image from the source image. Create a new UIImage and clean up.
Every time the textString or sourceImage or size values change, re-generate the final image.
CGSize size = /* assume this exists */;
UIImage *sourceImage = /* assume this exists */;
NSString *textString = /* assume this exists */;
char *text = [textString cStringUsingEncoding:NSMacOSRomanStringEncoding];
NSUInteger len = [textString lengthOfBytesUsingEncoding:cStringUsingEncoding:NSMacOSRomanStringEncoding];
NSMutableData *data = [NSMutableData dataWithLength:size.width*size.height*1];
CGContextRef context = CGBitmapContextCreate([data mutableBytes], size.width, size.height, 8, size.width, NULL, kCGImageAlphaOnly);
CGContextSelectFont(context, "Gill Sans Bold", 64.0f, kCGEncodingMacRoman);
CGContextSetTextDrawingMode(context, kCGTextFill);
CGContextSetBlendMode(context, kCGBlendModeCopy);
CGContextFillRect(context, overlay.bounds);
CGContextSetBlendMode(context, kCGBlendModeClear);
CGContextShowTextAtPoint(context, 16.0f, 16.0f, text, len);
CGImageRef textImage = CGBitmapContextCreateImage(context);
CGImageRef newImage = CGImageCreateWithMask(sourceImage.CGImage, textImage);
UIImage *finalImage = [UIImage imageWithCGImage:newImage];
CGContextRelease(context);
CFRelease(newImage);
CFRelease(textImage);
Another way to do this involves putting the textImage into a new layer and setting that layer on your view's layer. (Remove the lines that create "newImage" and "finalImage".) Assuming this happens inside your view's code somewhere:
CALayer *maskLayer = [[CALayer alloc] init];
CGPoint position = CGPointZero;
// layout the new layer
position = overlay.layer.position;
position.y *= 0.5f;
maskLayer.bounds = overlay.layer.bounds;
maskLayer.position = position;
maskLayer.contents = (__bridge id)textImage;
self.layer.mask = maskLayer;
There are more alternatives, some might be better (subclass UIImage and draw the text directly in clear mode after the superclass has done its drawing?).
Swift 5 solution (Xcode: 12.5):
class MaskedLabel: UILabel {
var maskColor : UIColor?
override init(frame: CGRect) {
super.init(frame: frame)
customInit()
}
required init?(coder aDecoder: NSCoder) {
super.init(coder: aDecoder)
customInit()
}
func customInit() {
maskColor = self.backgroundColor
self.textColor = UIColor.white
backgroundColor = UIColor.clear
self.isOpaque = false
}
override func draw(_ rect: CGRect) {
let context = UIGraphicsGetCurrentContext()!
super.draw(rect)
context.concatenate(__CGAffineTransformMake(1, 0, 0, -1, 0, rect.height))
let image: CGImage = context.makeImage()!
let mask: CGImage = CGImage(maskWidth: image.width, height: image.height, bitsPerComponent: image.bitsPerComponent, bitsPerPixel: image.bitsPerPixel, bytesPerRow: image.bytesPerRow, provider: image.dataProvider!, decode: image.decode, shouldInterpolate: image.shouldInterpolate)!
context.clear(rect)
context.saveGState()
context.clip(to: rect, mask: mask)
if (self.layer.cornerRadius != 0.0) {
context.addPath(CGPath(roundedRect: rect, cornerWidth: self.layer.cornerRadius, cornerHeight: self.layer.cornerRadius, transform: nil))
context.clip()
}
drawBackgroundInRect(rect: rect)
context.restoreGState()
}
func drawBackgroundInRect(rect: CGRect) {
let context = UIGraphicsGetCurrentContext()
if let _ = maskColor {
maskColor!.set()
}
context!.fill(rect)
}
}

kCAFilterNearest maginifcation filter (UIImageView)

I am using QREncoder library found here: https://github.com/jverkoey/ObjQREncoder
Basically, i looked at the example code by this author, and when he creates the QRCode it comes out perfectly with no pixelation. The image itself that the library provides is 33 x 33 pixels, but he uses kCAFilterNearest to magnify and make it very clear (no pixilation). Here is his code:
UIImage* image = [QREncoder encode:#"http://www.google.com/"];
UIImageView* imageView = [[UIImageView alloc] initWithImage:image];
CGFloat qrSize = self.view.bounds.size.width - kPadding * 2;
imageView.frame = CGRectMake(kPadding, (self.view.bounds.size.height - qrSize) / 2,
qrSize, qrSize);
[imageView layer].magnificationFilter = kCAFilterNearest;
[self.view addSubview:imageView];
I have a UIImageView in a xib, and I am setting it's image like this:
[[template imageVQRCode] setImage:[QREncoder encode:ticketNum]];
[[[template imageVQRCode] layer] setMagnificationFilter:kCAFilterNearest];
but the qrcode is really blurry. In the example, it comes out crystal clear.
What am i doing wrong?
Thanks!
UPDATE:I found out that the problem isn't with scaling or anything to do with kCAFFilterNearest. It has to do with generating the PNG image from the view. Here's how it looks on the deive vs how it looks like when i save the UIView to the PNG representation (Notice the QRCodes quality):
UPDATE 2: This is how I am generating the PNG file from UIView:
UIGraphicsBeginImageContextWithOptions([[template view] bounds].size, YES, 0.0);
[[[template view] layer] renderInContext:UIGraphicsGetCurrentContext()];
UIImage *viewImage = UIGraphicsGetImageFromCurrentImageContext();
UIGraphicsEndImageContext();
[UIImagePNGRepresentation(viewImage) writeToFile:plistPath atomically:YES];
I have used below function for editing image.
- (UIImage *)resizedImage:(CGSize)newSize interpolationQuality:(CGInterpolationQuality)quality
{
BOOL drawTransposed;
switch (self.imageOrientation) {
case UIImageOrientationLeft:
case UIImageOrientationLeftMirrored:
case UIImageOrientationRight:
case UIImageOrientationRightMirrored:
drawTransposed = YES;
break;
default:
drawTransposed = NO;
}
return [self resizedImage:newSize
transform:[self transformForOrientation:newSize]
drawTransposed:drawTransposed
interpolationQuality:quality];
}
- (UIImage *)resizedImage:(CGSize)newSize
transform:(CGAffineTransform)transform
drawTransposed:(BOOL)transpose
interpolationQuality:(CGInterpolationQuality)quality
{
CGRect newRect = CGRectIntegral(CGRectMake(0, 0, newSize.width, newSize.height));
CGRect transposedRect = CGRectMake(0, 0, newRect.size.height, newRect.size.width);
CGImageRef imageRef = self.CGImage;
// Build a context that's the same dimensions as the new size
CGBitmapInfo bitmapInfo = CGImageGetBitmapInfo(imageRef);
if((bitmapInfo == kCGImageAlphaLast) || (bitmapInfo == kCGImageAlphaNone))
bitmapInfo = kCGImageAlphaNoneSkipLast;
CGContextRef bitmap = CGBitmapContextCreate(NULL,
newRect.size.width,
newRect.size.height,
CGImageGetBitsPerComponent(imageRef),
0,
CGImageGetColorSpace(imageRef),
bitmapInfo);
// Rotate and/or flip the image if required by its orientation
CGContextConcatCTM(bitmap, transform);
// Set the quality level to use when rescaling
CGContextSetInterpolationQuality(bitmap, quality);
// Draw into the context; this scales the image
CGContextDrawImage(bitmap, transpose ? transposedRect : newRect, imageRef);
// Get the resized image from the context and a UIImage
CGImageRef newImageRef = CGBitmapContextCreateImage(bitmap);
UIImage *newImage = [UIImage imageWithCGImage:newImageRef];
// Clean up
CGContextRelease(bitmap);
CGImageRelease(newImageRef);
return newImage;
}
UIImageWriteToSavedPhotosAlbum([image resizedImage:CGSizeMake(300, 300) interpolationQuality:kCGInterpolationNone], nil, nil, nil);
Please find below image and let me know if you need any help.