i want to get all the points in my UIImageVeiw , so i can change "some" points colors without the need of UITouch .. is it possible?
What i am thinking about is:
get all points in uiimageview
get color of each point.
if the previous color= some specific color, then change the color.
i googled this a lot but i found that all the tutorials depends on UITouch like this http://www.markj.net/iphone-uiimage-pixel-color/
my main goal now is how to get all points ?!
any help is appreciated
i found a solution .. hope it will help anyone one day.
this method returns array of pixels in some image.
-(NSArray*)getRGBAsFromImage:(UIImage*)image atX:(int)x andY:(int)y count:(int)count
{
NSMutableArray *result = [NSMutableArray arrayWithCapacity:count];
// First get the image into your data buffer
CGImageRef imageRef = [image CGImage];
NSUInteger width = CGImageGetWidth(imageRef);
NSUInteger height = CGImageGetHeight(imageRef);
CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB();
unsigned char *rawData = (unsigned char*) calloc(height * width * 4, sizeof(unsigned char));
NSUInteger bytesPerPixel = 4;
NSUInteger bytesPerRow = bytesPerPixel * width;
NSUInteger bitsPerComponent = 8;
CGContextRef context = CGBitmapContextCreate(rawData, width, height,
bitsPerComponent, bytesPerRow, colorSpace,
kCGImageAlphaPremultipliedLast | kCGBitmapByteOrder32Big);
CGColorSpaceRelease(colorSpace);
CGContextDrawImage(context, CGRectMake(0, 0, width, height), imageRef);
CGContextRelease(context);
// Now your rawData contains the image data in the RGBA8888 pixel format.
NSUInteger byteIndex = (bytesPerRow * y) + x * bytesPerPixel;
for (int i = 0 ; i < count ; ++i)
{
CGFloat red = (rawData[byteIndex] * 1.0) / 255.0;
CGFloat green = (rawData[byteIndex + 1] * 1.0) / 255.0;
CGFloat blue = (rawData[byteIndex + 2] * 1.0) / 255.0;
CGFloat alpha = (rawData[byteIndex + 3] * 1.0) / 255.0;
byteIndex += bytesPerPixel;
UIColor *acolor = [UIColor colorWithRed:red green:green blue:blue alpha:alpha];
[result addObject:acolor];
}
free(rawData);
return result;}
you can call it like this:
NSArray*arrayOfPixels= [self getRGBAsFromImage:_patternFirstImage.image atX:_patternFirstImage.frame.origin.x andY:_patternFirstImage.frame.origin.y count:_patternFirstImage.frame.size.width*_patternFirstImage.frame.size.height];
NSLog(#"arrayOfPixels = %zd",[arrayOfPixels count]);
you can loop through all pixels to get its color like the following :
for(int i=0;i<[arrayOfPixels count];i++){
NSLog(#"objects = ---%#----",[arrayOfPixels objectAtIndex:i]);
if([self color:[arrayOfPixels objectAtIndex:i] isEqualToColor:UIColorFromRGB(0xE0E0E0) withTolerance:0.2]){
NSLog(#"index = %zd",i);
}
}
thanx a lot for the answer here:
https://stackoverflow.com/a/1262893/2168496
Related
I am working on Document edge detection using OpenCV in my iOS Project and successfully detected the edges of document.
Now, I want to rotate the image along with detected rectangle. I have referred this
Github project to detect the edges.
For that, I first rotated the image and trying to re-detect the edges by again finding the largest rectangle of the image. By unfortunately, it is not giving me exact rectangle.
Can I somebody suggest me something to detect the rotated document's edges, again or shall I rotate the detected rectangle along with image ?
Before Rotation Image
After Rotation Image
+(NSMutableArray *) getLargestSquarePoints: (UIImage *) image : (CGSize) size {
Mat imageMat;
CGColorSpaceRef colorSpace = CGImageGetColorSpace(image.CGImage);
CGFloat cols = image.size.width;
CGFloat rows = image.size.height;
cv::Mat cvMat(rows, cols, CV_8UC4); // 8 bits per component, 4 channels
CGContextRef contextRef = CGBitmapContextCreate(cvMat.data, cols, rows, 8, cvMat.step[0], colorSpace, kCGImageAlphaNoneSkipLast | kCGBitmapByteOrderDefault);
CGContextDrawImage(contextRef, CGRectMake(0, 0, cols, rows), image.CGImage);
CGContextRelease(contextRef);
imageMat = cvMat;
cv::resize(imageMat, imageMat, cvSize(size.width, size.height));
// UIImageToMat(image, imageMat);
std::vector<std::vector<cv::Point> >rectangle;
std::vector<cv::Point> largestRectangle;
getRectangles(imageMat, rectangle);
getlargestRectangle(rectangle, largestRectangle);
if (largestRectangle.size() == 4)
{
// Thanks to: https://stackoverflow.com/questions/20395547/sorting-an-array-of-x-and-y-vertice-points-ios-objective-c/20399468#20399468
NSArray *points = [NSArray array];
points = #[
[NSValue valueWithCGPoint:(CGPoint){(CGFloat)largestRectangle[0].x, (CGFloat)largestRectangle[0].y}],
[NSValue valueWithCGPoint:(CGPoint){(CGFloat)largestRectangle[1].x, (CGFloat)largestRectangle[1].y}],
[NSValue valueWithCGPoint:(CGPoint){(CGFloat)largestRectangle[2].x, (CGFloat)largestRectangle[2].y}],
[NSValue valueWithCGPoint:(CGPoint){(CGFloat)largestRectangle[3].x, (CGFloat)largestRectangle[3].y}] ];
CGPoint min = [points[0] CGPointValue];
CGPoint max = min;
for (NSValue *value in points) {
CGPoint point = [value CGPointValue];
min.x = fminf(point.x, min.x);
min.y = fminf(point.y, min.y);
max.x = fmaxf(point.x, max.x);
max.y = fmaxf(point.y, max.y);
}
CGPoint center = {
0.5f * (min.x + max.x),
0.5f * (min.y + max.y),
};
NSLog(#"center: %#", NSStringFromCGPoint(center));
NSNumber *(^angleFromPoint)(id) = ^(NSValue *value){
CGPoint point = [value CGPointValue];
CGFloat theta = atan2f(point.y - center.y, point.x - center.x);
CGFloat angle = fmodf(M_PI - M_PI_4 + theta, 2 * M_PI);
return #(angle);
};
NSArray *sortedPoints = [points sortedArrayUsingComparator:^NSComparisonResult(id a, id b) {
return [angleFromPoint(a) compare:angleFromPoint(b)];
}];
NSLog(#"sorted points: %#", sortedPoints);
NSMutableArray *squarePoints = [[NSMutableArray alloc] init];
[squarePoints addObject: [sortedPoints objectAtIndex:0]];
[squarePoints addObject: [sortedPoints objectAtIndex:1]];
[squarePoints addObject: [sortedPoints objectAtIndex:2]];
[squarePoints addObject: [sortedPoints objectAtIndex:3]];
imageMat.release();
return squarePoints;
}
else{
imageMat.release();
return nil;
}
}
void getRectangles(cv::Mat& image, std::vector<std::vector<cv::Point>>&rectangles) {
// blur will enhance edge detection
cv::Mat blurred(image);
GaussianBlur(image, blurred, cvSize(11,11), 0);
cv::Mat gray0(blurred.size(), CV_8U), gray;
std::vector<std::vector<cv::Point> > contours;
// find squares in every color plane of the image
for (int c = 0; c < 3; c++)
{
int ch[] = {c, 0};
mixChannels(&blurred, 1, &gray0, 1, ch, 1);
// try several threshold levels
const int threshold_level = 2;
for (int l = 0; l < threshold_level; l++)
{
// Use Canny instead of zero threshold level!
// Canny helps to catch squares with gradient shading
if (l == 0)
{
Canny(gray0, gray, 10, 20, 3); //
// Canny(gray0, gray, 0, 50, 5);
// Dilate helps to remove potential holes between edge segments
dilate(gray, gray, cv::Mat(), cv::Point(-1,-1));
}
else
{
gray = gray0 >= (l+1) * 255 / threshold_level;
}
// Find contours and store them in a list
findContours(gray, contours, CV_RETR_LIST, CV_CHAIN_APPROX_SIMPLE);
// Test contours
std::vector<cv::Point> approx;
for (size_t i = 0; i < contours.size(); i++)
{
// approximate contour with accuracy proportional
// to the contour perimeter
approxPolyDP(cv::Mat(contours[i]), approx, arcLength(cv::Mat(contours[i]), true)*0.02, true);
// Note: absolute value of an area is used because
// area may be positive or negative - in accordance with the
// contour orientation
if (approx.size() == 4 &&
fabs(contourArea(cv::Mat(approx))) > 1000 &&
isContourConvex(cv::Mat(approx)))
{
double maxCosine = 0;
for (int j = 2; j < 5; j++)
{
double cosine = fabs(angle(approx[j%4], approx[j-2], approx[j-1]));
maxCosine = MAX(maxCosine, cosine);
}
if (maxCosine < 0.3)
rectangles.push_back(approx);
}
}
}
}
}
void getlargestRectangle(const std::vector<std::vector<cv::Point> >&rectangles, std::vector<cv::Point>& largestRectangle)
{
if (!rectangles.size())
{
return;
}
double maxArea = 0;
int index = 0;
for (size_t i = 0; i < rectangles.size(); i++)
{
cv::Rect rectangle = boundingRect(cv::Mat(rectangles[i]));
double area = rectangle.width * rectangle.height;
if (maxArea < area)
{
maxArea = area;
index = i;
}
}
largestRectangle = rectangles[index];
}
double angle(cv::Point pt1, cv::Point pt2, cv::Point pt0) {
double dx1 = pt1.x - pt0.x;
double dy1 = pt1.y - pt0.y;
double dx2 = pt2.x - pt0.x;
double dy2 = pt2.y - pt0.y;
return (dx1*dx2 + dy1*dy2)/sqrt((dx1*dx1 + dy1*dy1)*(dx2*dx2 + dy2*dy2) + 1e-10);
}
+(UIImage *) getTransformedImage: (CGFloat) newWidth : (CGFloat) newHeight : (UIImage *) origImage : (CGPoint [4]) corners : (CGSize) size {
cv::Mat imageMat;
CGColorSpaceRef colorSpace = CGImageGetColorSpace(origImage.CGImage);
CGFloat cols = size.width;
CGFloat rows = size.height;
cv::Mat cvMat(rows, cols, CV_8UC4); // 8 bits per component, 4 channels
CGContextRef contextRef = CGBitmapContextCreate(cvMat.data,
// Pointer to backing data
cols,
// Width of bitmap
rows,
// Height of bitmap
8,
// Bits per component
cvMat.step[0],
// Bytes per row
colorSpace,
// Colorspace
kCGImageAlphaNoneSkipLast |
kCGBitmapByteOrderDefault); // Bitmap info flags
CGContextDrawImage(contextRef, CGRectMake(0, 0, cols, rows), origImage.CGImage);
CGContextRelease(contextRef);
imageMat = cvMat;
cv::Mat newImageMat = cv::Mat( cvSize(newWidth,newHeight), CV_8UC4);
cv::Point2f src[4], dst[4];
src[0].x = corners[0].x;
src[0].y = corners[0].y;
src[1].x = corners[1].x;
src[1].y = corners[1].y;
src[2].x = corners[2].x;
src[2].y = corners[2].y;
src[3].x = corners[3].x;
src[3].y = corners[3].y;
dst[0].x = 0;
dst[0].y = -10;
dst[1].x = newWidth - 1;
dst[1].y = -10;
dst[2].x = newWidth - 1;
dst[2].y = newHeight + 1;
dst[3].x = 0;
dst[3].y = newHeight + 1;
dst[0].x = 0;
dst[0].y = 0;
dst[1].x = newWidth - 1;
dst[1].y = 0;
dst[2].x = newWidth - 1;
dst[2].y = newHeight - 1;
dst[3].x = 0;
dst[3].y = newHeight - 1;
cv::warpPerspective(imageMat, newImageMat, cv::getPerspectiveTransform(src, dst), cvSize(newWidth, newHeight));
//Transform to UIImage
NSData *data = [NSData dataWithBytes:newImageMat.data length:newImageMat.elemSize() * newImageMat.total()];
CGColorSpaceRef colorSpace2;
if (newImageMat.elemSize() == 1) {
colorSpace2 = CGColorSpaceCreateDeviceGray();
} else {
colorSpace2 = CGColorSpaceCreateDeviceGray();
// colorSpace2 = CGColorSpaceCreateDeviceRGB();
}
CGDataProviderRef provider = CGDataProviderCreateWithCFData((__bridge CFDataRef)data);
CGFloat width = newImageMat.cols;
CGFloat height = newImageMat.rows;
CGImageRef imageRef = CGImageCreate(width, height, 8, 8 * newImageMat.elemSize(),
newImageMat.step[0],
colorSpace2,
kCGImageAlphaNone | kCGBitmapByteOrderDefault, provider,
NULL, false, kCGRenderingIntentDefault);
UIImage *image = [[UIImage alloc] initWithCGImage:imageRef];
CGImageRelease(imageRef);
CGDataProviderRelease(provider);
CGColorSpaceRelease(colorSpace2);
return image;
}
If you use cv2.minAreaRect, it gives the best inclosing rectangle to a contour and the degrees, so you can rotate back.
I want to change color of my image but dont want to change alpha of image.
I am using following code for change color in blue.
But i want to change image of all pixels array into perticuler RGB value.
Like I have to apply RGB value (R= 116 G=170 B= 243).
CGImageRef sourceImage = ImageView_Test.image.CGImage;
CFDataRef theData;
theData = CGDataProviderCopyData(CGImageGetDataProvider(sourceImage));
UInt8 *pixelData = (UInt8 *) CFDataGetBytePtr(theData);
int red = 0;
int green = 1;
int blue = 2;
int dataLength = CFDataGetLength(theData);
for (int index = 0; index < dataLength; index += 4)
{
if (pixelData[index + blue] - 80 > 0)
{
pixelData[index + red] = pixelData[index + blue] - 139;
pixelData[index + green] = pixelData[index + blue] - 85;
}
else
{
pixelData[index + green] = 0;
pixelData[index + red] = 0;
}
}
CGContextRef context;
context = CGBitmapContextCreate(pixelData,
CGImageGetWidth(sourceImage),
CGImageGetHeight(sourceImage),
8,
CGImageGetBytesPerRow(sourceImage),
CGImageGetColorSpace(sourceImage),
kCGImageAlphaPremultipliedLast);
CGImageRef newCGImage = CGBitmapContextCreateImage(context);
UIImage *newImage = [UIImage imageWithCGImage:newCGImage];
ImageView_Test.image = newImage;
CGContextRelease(context);
CFRelease(theData);
CGImageRelease(newCGImage);
I am using following method for change color of UIImage without affecting alpha of it.
-(UIImage *)didImageColorchanged:(NSString *)name withColor:(UIColor *)color
{
UIImage *img = [UIImage imageNamed:name];
UIGraphicsBeginImageContext(img.size);
CGContextRef context = UIGraphicsGetCurrentContext();
[color setFill];
CGContextTranslateCTM(context, 0, img.size.height);
CGContextScaleCTM(context, 1.0, -1.0);
CGContextSetBlendMode(context, kCGBlendModeColorBurn);
CGRect rect = CGRectMake(0, 0, img.size.width, img.size.height);
CGContextDrawImage(context, rect, img.CGImage);
CGContextClipToMask(context, rect, img.CGImage);
CGContextAddRect(context, rect);
CGContextDrawPath(context,kCGPathFill);
UIImage *coloredImg = UIGraphicsGetImageFromCurrentImageContext();
UIGraphicsEndImageContext();
return coloredImg;
}
Ex:
resultView.image = [self didImageColorchanged:[UIImage imageNamed:#"xyz.png"] withColor:[UIColor redColor]];
you can just use
// load image
UIImage *image = [UIImage imageNamed:#"test.png"];
CGImageRef imageRef = image.CGImage;
NSData *data = (NSData *)CGDataProviderCopyData(CGImageGetDataProvider(imageRef));
char *pixels = (char *)[data bytes];
// this is where you manipulate the individual pixels
// assumes a 4 byte pixel consisting of rgb and alpha
// for PNGs without transparency use i+=3 and remove int a
for(int i = 0; i < [data length]; i += 4)
{
int r = i;
int g = i+1;
int b = i+2;
int a = i+3;
pixels[r] = 0; // eg. remove red
pixels[g] = pixels[g];
pixels[b] = pixels[b];
pixels[a] = pixels[a];
}
// create a new image from the modified pixel data
size_t width = CGImageGetWidth(imageRef);
size_t height = CGImageGetHeight(imageRef);
size_t bitsPerComponent = CGImageGetBitsPerComponent(imageRef);
size_t bitsPerPixel = CGImageGetBitsPerPixel(imageRef);
size_t bytesPerRow = CGImageGetBytesPerRow(imageRef);
CGColorSpaceRef colorspace = CGColorSpaceCreateDeviceRGB();
CGBitmapInfo bitmapInfo = CGImageGetBitmapInfo(imageRef);
CGDataProviderRef provider = CGDataProviderCreateWithData(NULL, pixels, [data length], NULL);
CGImageRef newImageRef = CGImageCreate (
width,
height,
bitsPerComponent,
bitsPerPixel,
bytesPerRow,
colorspace,
bitmapInfo,
provider,
NULL,
false,
kCGRenderingIntentDefault
);
// the modified image
UIImage *newImage = [UIImage imageWithCGImage:newImageRef];
// cleanup
free(pixels);
CGImageRelease(imageRef);
CGColorSpaceRelease(colorspace);
CGDataProviderRelease(provider);
CGImageRelease(newImageRef);
I am making a military strategy game. When an army in my game takes over a territory, I want to be able to change the color of the territory on the map so that it shows the new controller of that territory. Here is the code I have that changes a territory's image:
I would write this line for example:
UIImage *newImg = [self imageOfTerritoryWithNewArmy:#"japan" AndOldTerritoryImage:[UIImage imageNamed:#"ireland.gif"]];
Here is the rest of the code:
-(UIImage *)createImageWithRGB:(NSArray *)colorData width:(NSInteger)width height:(NSInteger)height{
unsigned char *rawData = malloc(width*height*4);
for (int i=0; i<width*height; ++i)
{
CGFloat red;
CGFloat green;
CGFloat blue;
CGFloat alpha;
UIColor *color = colorData[i];
if ([color respondsToSelector:#selector(getRed:green:blue:alpha:)]) {
[color getRed:&red green:&green blue:&blue alpha:&alpha];
} else {
const CGFloat *components = CGColorGetComponents(color.CGColor);
red = components[0];
green = components[1];
blue = components[2];
alpha = components[3];
}
if(alpha > 0){
rawData[4*i] = red * 255;
rawData[4*i+1] = green * 255;
rawData[4*i+2] = blue * 255;
rawData[4*i+3] = alpha * 255;
}
else{
rawData[4*i] = 255;
rawData[4*i+1] = 255;
rawData[4*i+2] = 255;
rawData[4*i+3] = 0;
}
}
CGDataProviderRef provider = CGDataProviderCreateWithData(NULL,
rawData,
width*height*4,
NULL);
CGColorSpaceRef colorSpaceRef = CGColorSpaceCreateDeviceRGB();
CGBitmapInfo bitmapInfo = kCGBitmapByteOrderDefault;
CGColorRenderingIntent renderingIntent = kCGRenderingIntentDefault;
CGImageRef imageRef = CGImageCreate(width,
height,
8,
32,
4*width,colorSpaceRef,
bitmapInfo,
provider,NULL,NO,renderingIntent);
UIImage *newImage = [UIImage imageWithCGImage:imageRef];
return newImage;
}
- (NSArray *)getRGBAsFromImage:(UIImage*)image atX:(int)xx andY:(int)yy count:(int)count{
NSMutableArray *result = [NSMutableArray arrayWithCapacity:count];
// First get the image into your data buffer
CGImageRef imageRef = [image CGImage];
NSUInteger width = CGImageGetWidth(imageRef);
NSUInteger height = CGImageGetHeight(imageRef);
CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB();
unsigned char *rawData = (unsigned char*) calloc(height * width * 4, sizeof(unsigned char));
NSUInteger bytesPerPixel = 4;
NSUInteger bytesPerRow = bytesPerPixel * width;
NSUInteger bitsPerComponent = 8;
CGContextRef context = CGBitmapContextCreate(rawData, width, height,
bitsPerComponent, bytesPerRow, colorSpace,
kCGImageAlphaPremultipliedLast | kCGBitmapByteOrder32Big);
CGColorSpaceRelease(colorSpace);
CGContextDrawImage(context, CGRectMake(0, 0, width, height), imageRef);
CGContextRelease(context);
// Now your rawData contains the image data in the RGBA8888 pixel format.
int byteIndex = (bytesPerRow * yy) + xx * bytesPerPixel;
for (int ii = 0 ; ii < count ; ++ii)
{
CGFloat red = (rawData[byteIndex] * 1.0) / 255.0;
CGFloat green = (rawData[byteIndex + 1] * 1.0) / 255.0;
CGFloat blue = (rawData[byteIndex + 2] * 1.0) / 255.0;
CGFloat alpha = (rawData[byteIndex + 3] * 1.0) / 255.0;
byteIndex += 4;
UIColor *acolor = [UIColor colorWithRed:red green:green blue:blue alpha:alpha];
[result addObject:acolor];
}
free(rawData);
return result;
}
-(UIColor *)colorOfArmy:(NSString *)army{
UIColor *color;
army = [army stringByReplacingOccurrencesOfString:#"\a" withString:#""];
if([army isEqual:#"france"]){
color = [[UIColor alloc] initWithRed:0.3137254 green:0.3686274 blue:0.9058823 alpha:1];
}
if([army isEqual:#"germany"]){
color = [[UIColor alloc] initWithRed:0.6352941 green:0.4313725 blue:0.3372549 alpha:1];
}
if([army isEqual:#"uk"]){
color = [[UIColor alloc] initWithRed:0.8941176 green:0.4235294 blue:0.4941176 alpha:1];
}
if([army isEqual:#"italy"]){
color = [[UIColor alloc] initWithRed:0.5137254 green:0.1215686 blue:0.4745098 alpha:1];
}
if([army isEqual:#"ussr"]){
color = [[UIColor alloc] initWithRed:0.3607843 green:0.0823529 blue:0.1215686 alpha:1];
}
if([army isEqual:#"japan"]){
color = [[UIColor alloc] initWithRed:0.9215686 green:0.6156862 blue:0.3137254 alpha:1];
}
if([army isEqual:#""]){
color = [[UIColor alloc] initWithRed:0.1215686 green:0.2823529 blue:0.1607843 alpha:1];
}
if(color == nil){
NSLog(#"the problem was %#", army);
}
return color;
}
-(UIImage *)imageOfTerritoryWithNewArmy:(NSString *)army AndOldTerritoryImage:(UIImage *)oldImg{
CGImageRef image = oldImg.CGImage;
NSInteger width = CGImageGetWidth(image);
NSInteger height = CGImageGetHeight(image);
NSArray *rgba = [self getRGBAsFromImage:oldImg atX:0 andY:0 count:width * height];
NSMutableArray *fixedRGBA = [[NSMutableArray alloc] init];
UIColor *armyColor = [self colorOfArmy:army];
for(UIColor *pixel in rgba){
CGFloat red;
CGFloat green;
CGFloat blue;
CGFloat alpha;
if ([pixel respondsToSelector:#selector(getRed:green:blue:alpha:)]) {
[pixel getRed:&red green:&green blue:&blue alpha:&alpha];
} else {
const CGFloat *components = CGColorGetComponents(pixel.CGColor);
red = components[0];
green = components[1];
blue = components[2];
alpha = components[3];
}
red = red * 255;
green = green * 255;
blue = blue * 255;
if(alpha > 0){
if(red < 50 && green < 50 && blue < 50){
[fixedRGBA addObject:pixel];
}
else{
[fixedRGBA addObject:armyColor];
}
}
else{
[fixedRGBA addObject:pixel];
}
}
return [self createImageWithRGB:fixedRGBA width:width height:height];
}
The problem that I am having is that when the image is drawn again, all of the pixels that used to be blank because they have an alpha value of 0 are displayed as white. How can I get these pixels to still be displayed as clear pixels?
When creating the image, you should specify that it contains an alpha channel. That is, instead of:
CGBitmapInfo bitmapInfo = kCGBitmapByteOrderDefault;
use:
CGBitmapInfo bitmapInfo = kCGBitmapByteOrderDefault | kCGImageAlphaLast;
See the CGImage interface specification for more info.
I believe your else clause here is where you want to set the color to clear
if(alpha > 0){
rawData[4*i] = red * 255;
rawData[4*i+1] = green * 255;
rawData[4*i+2] = blue * 255;
rawData[4*i+3] = alpha * 255;
}
else{
rawData[4*i] = 255;
rawData[4*i+1] = 255;
rawData[4*i+2] = 255;
rawData[4*i+3] = 255;
}
You have to change the last assignment to 0
if(alpha > 0){
rawData[4*i] = red * 255;
rawData[4*i+1] = green * 255;
rawData[4*i+2] = blue * 255;
rawData[4*i+3] = alpha * 255;
}
else{
rawData[4*i] = 255;
rawData[4*i+1] = 255;
rawData[4*i+2] = 255;
rawData[4*i+3] = 0;
}
My problem is: UIImage is rotated after processing.
I use a helper class for the image processing called ProcessHelper. This class has two methods:
+ (unsigned char *) convertUIImageToBitmapRGBA8:(UIImage *) image;
+ (UIImage *) convertBitmapRGBA8ToUIImage:(unsigned char *)rawData
withWidth:(int) width
withHeight:(int) height;
implementation
+ (unsigned char *) convertUIImageToBitmapRGBA8:(UIImage *) image {
NSLog(#"Convert image [%d x %d] to RGBA8 char data", (int)image.size.width,
(int)image.size.height);
CGImageRef imageRef = [image CGImage];
NSUInteger width = CGImageGetWidth(imageRef);
NSUInteger height = CGImageGetHeight(imageRef);
CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB();
unsigned char *rawData = malloc(height * width * 4);
NSUInteger bytesPerPixel = 4;
NSUInteger bytesPerRow = bytesPerPixel * width;
NSUInteger bitsPerComponent = 8;
CGContextRef context = CGBitmapContextCreate(rawData,
width,
height,
bitsPerComponent,
bytesPerRow,
colorSpace,
kCGImageAlphaPremultipliedLast | kCGBitmapByteOrder32Big);
CGColorSpaceRelease(colorSpace);
CGContextDrawImage(context, CGRectMake(0, 0, width, height), imageRef);
CGContextRelease(context);
return rawData;
}
+ (UIImage *) convertBitmapRGBA8ToUIImage:(unsigned char *) rawData
withWidth:(int) width
withHeight:(int) height {
CGContextRef ctx = CGBitmapContextCreate(rawData,
width,
height,
8,
width * 4,
CGColorSpaceCreateDeviceRGB(),
kCGImageAlphaPremultipliedLast );
CGImageRef imageRef = CGBitmapContextCreateImage (ctx);
UIImage* rawImage = [UIImage imageWithCGImage:imageRef];
CGContextRelease(ctx);
free(rawData);
return rawImage;
}
I
On start I get pixel data:
rawData = [ProcessHelper convertUIImageToBitmapRGBA8:image];
Next I do some processing:
-(void)process_grayscale {
int byteIndex = 0;
for (int i = 0 ; i < workingImage.size.width * workingImage.size.height ; ++i)
{
int outputColor = (rawData[byteIndex] + rawData[byteIndex+1] + rawData[byteIndex+2]) / 3;
rawData[byteIndex] = rawData[byteIndex + 1] = rawData[byteIndex + 2] = (char) (outputColor);
byteIndex += 4;
}
workingImage = [ProcessHelper convertBitmapRGBA8ToUIImage:rawData
withWidth:CGImageGetWidth(workingImage.CGImage)
withHeight:CGImageGetHeight(workingImage.CGImage)];
}
After this I returned the workingImage to parent class and UIImageView shows it returned but in old size, I mean: image before is WxH and after is WxH but rotated (should be HxW, after rotate). I would like to make the image does not rotate.
This happens when I edit photos from ipad. Screenshots are ok and images from internet like backgrounds are ok.
How can I do this correctly?
use UIGraphicsPushContext(ctx); [image drawInRect:CGRectMake(0, 0, width, height)]; UIGraphicsPopContext(); instead of CGContextDrawImage. CGContextDrawImage will flip the image verticaly.
Or Scale and Transform the Context before calling CGContextDrawImage
i load image from camera roll, but this image is upside down. so i wrote method to rotate it.
CGImageRef imageRef = [image CGImage];
float width = CGImageGetWidth(imageRef);
float height = CGImageGetHeight(imageRef);
CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB();
Byte *rawData = malloc(height * width * 4);
Byte bytesPerPixel = 4;
int bytesPerRow = bytesPerPixel * width;
Byte bitsPerComponent = 8;
CGContextRef context = CGBitmapContextCreate(rawData, width, height, bitsPerComponent, bytesPerRow, colorSpace, kCGImageAlphaPremultipliedLast | kCGBitmapByteOrder32Big);
CGColorSpaceRelease(colorSpace);
int byteIndex = 0;
CGContextDrawImage(context, CGRectMake(0, 0, width, height), imageRef);
Byte *rawData2 = malloc(height * width * 4);
for (int i = 0 ; i < width * height ; i++) {
int index = (width * height) * 4;
rawData2[byteIndex + 0] = rawData[index - byteIndex + 0];
rawData2[byteIndex + 1] = rawData[index - byteIndex + 1];
rawData2[byteIndex + 2] = rawData[index - byteIndex + 2];
rawData2[byteIndex + 3] = rawData[index - byteIndex + 3];
byteIndex += 4;
}
CGContextRef ctx = CGBitmapContextCreate(rawData2, CGImageGetWidth( imageRef ), CGImageGetHeight( imageRef ), 8, CGImageGetBytesPerRow( imageRef ), CGImageGetColorSpace( imageRef ),kCGImageAlphaPremultipliedLast );
imageRef = CGBitmapContextCreateImage (ctx);
image = [UIImage imageWithCGImage:imageRef];
CGContextRelease(context);
return image;
it's ok, but now i must flip it horizontally, and i don't know how can i do this. i try to do this second day.
thank you for help
Have you tried this:
imageView.transform = CGAffineTransformMakeScale(-1, 1);
?
You can also do the rotation by using a transformation:
imageView.transform = CGAffineTransformMakeRotation(M_PI);
You can concat the two transformation in one like this:
imageView.transform = CGAffineTransformRotation(CGAffineTransformMakeScale(-1, 1), M_PI);
If you want to make your own UIImage object, rather than manipulating views and transformations, I would still suggest you to use the approach described above to make the view draw the image as you like, then convert your UIView content into an UIImage object:
UIGraphicsBeginImageContext(rect.size);
[imageView.layer renderInContext:UIGraphicsGetCurrentContext()];
UIImage* viewImage = UIGraphicsGetImageFromCurrentImageContext();
UIGraphicsEndImageContext();