How can I access hyperlinks in PDF documents on the iPhone and iPad?
I want to set links on the content of an e-book.
Use following function to put UIButtons on the links in PDF. This will draw Buttons on your UIView. You have to pass your page as parameter. Currently it will draw button with type UIButtonTypeRoundedRect you change afterwards it to custom so it will be hidden. Initially buttons may be drawn other place than link so you need to adjust x/y position according to you requirement.
-(void)drawLinks:(CGPDFPageRef)thisPage
{
CGPDFDictionaryRef pageDictionary = CGPDFPageGetDictionary(thisPage);
CGPDFArrayRef outputArray;
if(!CGPDFDictionaryGetArray(pageDictionary,"Annots", &outputArray))
{
return;
}
int arrayCount = CGPDFArrayGetCount( outputArray );
if(!arrayCount)
{
//continue;
}
for( int j = 0; j < arrayCount; ++j )
{
CGPDFObjectRef aDictObj;
if(!CGPDFArrayGetObject(outputArray, j, &aDictObj))
{
return;
}
CGPDFDictionaryRef annotDict;
if(!CGPDFObjectGetValue(aDictObj, kCGPDFObjectTypeDictionary, &annotDict)) {
return;
}
CGPDFDictionaryRef aDict;
if(!CGPDFDictionaryGetDictionary(annotDict, "A", &aDict)) {
return;
}
CGPDFStringRef uriStringRef;
if(!CGPDFDictionaryGetString(aDict, "URI", &uriStringRef)) {
return;
}
CGPDFArrayRef rectArray;
if(!CGPDFDictionaryGetArray(annotDict, "Rect", &rectArray)) {
return;
}
int arrayCount = CGPDFArrayGetCount( rectArray );
CGPDFReal coords[4];
for( int k = 0; k < arrayCount; ++k ) {
CGPDFObjectRef rectObj;
if(!CGPDFArrayGetObject(rectArray, k, &rectObj)) {
return;
}
CGPDFReal coord;
if(!CGPDFObjectGetValue(rectObj, kCGPDFObjectTypeReal, &coord)) {
return;
}
coords[k] = coord;
}
char *uriString = (char *)CGPDFStringGetBytePtr(uriStringRef);
NSString *uri = [NSString stringWithCString:uriString encoding:NSUTF8StringEncoding];
CGRect rect = CGRectMake(coords[0],coords[1],coords[2],coords[3]);
CGPDFInteger pageRotate = 0;
CGPDFDictionaryGetInteger( pageDictionary, "Rotate", &pageRotate );
CGRect pageRect = CGRectIntegral( CGPDFPageGetBoxRect( thisPage, kCGPDFMediaBox ));
if( pageRotate == 90 || pageRotate == 270 )
{
CGFloat temp = pageRect.size.width;
pageRect.size.width = pageRect.size.height;
pageRect.size.height = temp;
}
rect.size.width -= rect.origin.x;
rect.size.height -= rect.origin.y;
CGAffineTransform trans = CGAffineTransformIdentity;
trans = CGAffineTransformTranslate(trans, 0, pageRect.size.height);
trans = CGAffineTransformScale(trans, 1.0, -1.0);
rect = CGRectApplyAffineTransform(rect, trans);
UIButton *btnTmp = [UIButton buttonWithType:UIButtonTypeRoundedRect]; // After testing put here UIButtonTypeCustom
[btnTmp addTarget:self action:#selector(urlOpen:) forControlEvents:UIControlEventTouchUpInside];
rect.origin.x = rect.origin.x + 78; //I have adjusted this as per my requirement
rect.origin.y = rect.origin.y + 108; // I have adjusted this as per my requirement
btnTmp.frame = rect;
btnTmp.titleLabel.text = uri;
[self.superview addSubview:btnTmp];
}
}
Related
I am working on Document edge detection using OpenCV in my iOS Project and successfully detected the edges of document.
Now, I want to rotate the image along with detected rectangle. I have referred this
Github project to detect the edges.
For that, I first rotated the image and trying to re-detect the edges by again finding the largest rectangle of the image. By unfortunately, it is not giving me exact rectangle.
Can I somebody suggest me something to detect the rotated document's edges, again or shall I rotate the detected rectangle along with image ?
Before Rotation Image
After Rotation Image
+(NSMutableArray *) getLargestSquarePoints: (UIImage *) image : (CGSize) size {
Mat imageMat;
CGColorSpaceRef colorSpace = CGImageGetColorSpace(image.CGImage);
CGFloat cols = image.size.width;
CGFloat rows = image.size.height;
cv::Mat cvMat(rows, cols, CV_8UC4); // 8 bits per component, 4 channels
CGContextRef contextRef = CGBitmapContextCreate(cvMat.data, cols, rows, 8, cvMat.step[0], colorSpace, kCGImageAlphaNoneSkipLast | kCGBitmapByteOrderDefault);
CGContextDrawImage(contextRef, CGRectMake(0, 0, cols, rows), image.CGImage);
CGContextRelease(contextRef);
imageMat = cvMat;
cv::resize(imageMat, imageMat, cvSize(size.width, size.height));
// UIImageToMat(image, imageMat);
std::vector<std::vector<cv::Point> >rectangle;
std::vector<cv::Point> largestRectangle;
getRectangles(imageMat, rectangle);
getlargestRectangle(rectangle, largestRectangle);
if (largestRectangle.size() == 4)
{
// Thanks to: https://stackoverflow.com/questions/20395547/sorting-an-array-of-x-and-y-vertice-points-ios-objective-c/20399468#20399468
NSArray *points = [NSArray array];
points = #[
[NSValue valueWithCGPoint:(CGPoint){(CGFloat)largestRectangle[0].x, (CGFloat)largestRectangle[0].y}],
[NSValue valueWithCGPoint:(CGPoint){(CGFloat)largestRectangle[1].x, (CGFloat)largestRectangle[1].y}],
[NSValue valueWithCGPoint:(CGPoint){(CGFloat)largestRectangle[2].x, (CGFloat)largestRectangle[2].y}],
[NSValue valueWithCGPoint:(CGPoint){(CGFloat)largestRectangle[3].x, (CGFloat)largestRectangle[3].y}] ];
CGPoint min = [points[0] CGPointValue];
CGPoint max = min;
for (NSValue *value in points) {
CGPoint point = [value CGPointValue];
min.x = fminf(point.x, min.x);
min.y = fminf(point.y, min.y);
max.x = fmaxf(point.x, max.x);
max.y = fmaxf(point.y, max.y);
}
CGPoint center = {
0.5f * (min.x + max.x),
0.5f * (min.y + max.y),
};
NSLog(#"center: %#", NSStringFromCGPoint(center));
NSNumber *(^angleFromPoint)(id) = ^(NSValue *value){
CGPoint point = [value CGPointValue];
CGFloat theta = atan2f(point.y - center.y, point.x - center.x);
CGFloat angle = fmodf(M_PI - M_PI_4 + theta, 2 * M_PI);
return #(angle);
};
NSArray *sortedPoints = [points sortedArrayUsingComparator:^NSComparisonResult(id a, id b) {
return [angleFromPoint(a) compare:angleFromPoint(b)];
}];
NSLog(#"sorted points: %#", sortedPoints);
NSMutableArray *squarePoints = [[NSMutableArray alloc] init];
[squarePoints addObject: [sortedPoints objectAtIndex:0]];
[squarePoints addObject: [sortedPoints objectAtIndex:1]];
[squarePoints addObject: [sortedPoints objectAtIndex:2]];
[squarePoints addObject: [sortedPoints objectAtIndex:3]];
imageMat.release();
return squarePoints;
}
else{
imageMat.release();
return nil;
}
}
void getRectangles(cv::Mat& image, std::vector<std::vector<cv::Point>>&rectangles) {
// blur will enhance edge detection
cv::Mat blurred(image);
GaussianBlur(image, blurred, cvSize(11,11), 0);
cv::Mat gray0(blurred.size(), CV_8U), gray;
std::vector<std::vector<cv::Point> > contours;
// find squares in every color plane of the image
for (int c = 0; c < 3; c++)
{
int ch[] = {c, 0};
mixChannels(&blurred, 1, &gray0, 1, ch, 1);
// try several threshold levels
const int threshold_level = 2;
for (int l = 0; l < threshold_level; l++)
{
// Use Canny instead of zero threshold level!
// Canny helps to catch squares with gradient shading
if (l == 0)
{
Canny(gray0, gray, 10, 20, 3); //
// Canny(gray0, gray, 0, 50, 5);
// Dilate helps to remove potential holes between edge segments
dilate(gray, gray, cv::Mat(), cv::Point(-1,-1));
}
else
{
gray = gray0 >= (l+1) * 255 / threshold_level;
}
// Find contours and store them in a list
findContours(gray, contours, CV_RETR_LIST, CV_CHAIN_APPROX_SIMPLE);
// Test contours
std::vector<cv::Point> approx;
for (size_t i = 0; i < contours.size(); i++)
{
// approximate contour with accuracy proportional
// to the contour perimeter
approxPolyDP(cv::Mat(contours[i]), approx, arcLength(cv::Mat(contours[i]), true)*0.02, true);
// Note: absolute value of an area is used because
// area may be positive or negative - in accordance with the
// contour orientation
if (approx.size() == 4 &&
fabs(contourArea(cv::Mat(approx))) > 1000 &&
isContourConvex(cv::Mat(approx)))
{
double maxCosine = 0;
for (int j = 2; j < 5; j++)
{
double cosine = fabs(angle(approx[j%4], approx[j-2], approx[j-1]));
maxCosine = MAX(maxCosine, cosine);
}
if (maxCosine < 0.3)
rectangles.push_back(approx);
}
}
}
}
}
void getlargestRectangle(const std::vector<std::vector<cv::Point> >&rectangles, std::vector<cv::Point>& largestRectangle)
{
if (!rectangles.size())
{
return;
}
double maxArea = 0;
int index = 0;
for (size_t i = 0; i < rectangles.size(); i++)
{
cv::Rect rectangle = boundingRect(cv::Mat(rectangles[i]));
double area = rectangle.width * rectangle.height;
if (maxArea < area)
{
maxArea = area;
index = i;
}
}
largestRectangle = rectangles[index];
}
double angle(cv::Point pt1, cv::Point pt2, cv::Point pt0) {
double dx1 = pt1.x - pt0.x;
double dy1 = pt1.y - pt0.y;
double dx2 = pt2.x - pt0.x;
double dy2 = pt2.y - pt0.y;
return (dx1*dx2 + dy1*dy2)/sqrt((dx1*dx1 + dy1*dy1)*(dx2*dx2 + dy2*dy2) + 1e-10);
}
+(UIImage *) getTransformedImage: (CGFloat) newWidth : (CGFloat) newHeight : (UIImage *) origImage : (CGPoint [4]) corners : (CGSize) size {
cv::Mat imageMat;
CGColorSpaceRef colorSpace = CGImageGetColorSpace(origImage.CGImage);
CGFloat cols = size.width;
CGFloat rows = size.height;
cv::Mat cvMat(rows, cols, CV_8UC4); // 8 bits per component, 4 channels
CGContextRef contextRef = CGBitmapContextCreate(cvMat.data,
// Pointer to backing data
cols,
// Width of bitmap
rows,
// Height of bitmap
8,
// Bits per component
cvMat.step[0],
// Bytes per row
colorSpace,
// Colorspace
kCGImageAlphaNoneSkipLast |
kCGBitmapByteOrderDefault); // Bitmap info flags
CGContextDrawImage(contextRef, CGRectMake(0, 0, cols, rows), origImage.CGImage);
CGContextRelease(contextRef);
imageMat = cvMat;
cv::Mat newImageMat = cv::Mat( cvSize(newWidth,newHeight), CV_8UC4);
cv::Point2f src[4], dst[4];
src[0].x = corners[0].x;
src[0].y = corners[0].y;
src[1].x = corners[1].x;
src[1].y = corners[1].y;
src[2].x = corners[2].x;
src[2].y = corners[2].y;
src[3].x = corners[3].x;
src[3].y = corners[3].y;
dst[0].x = 0;
dst[0].y = -10;
dst[1].x = newWidth - 1;
dst[1].y = -10;
dst[2].x = newWidth - 1;
dst[2].y = newHeight + 1;
dst[3].x = 0;
dst[3].y = newHeight + 1;
dst[0].x = 0;
dst[0].y = 0;
dst[1].x = newWidth - 1;
dst[1].y = 0;
dst[2].x = newWidth - 1;
dst[2].y = newHeight - 1;
dst[3].x = 0;
dst[3].y = newHeight - 1;
cv::warpPerspective(imageMat, newImageMat, cv::getPerspectiveTransform(src, dst), cvSize(newWidth, newHeight));
//Transform to UIImage
NSData *data = [NSData dataWithBytes:newImageMat.data length:newImageMat.elemSize() * newImageMat.total()];
CGColorSpaceRef colorSpace2;
if (newImageMat.elemSize() == 1) {
colorSpace2 = CGColorSpaceCreateDeviceGray();
} else {
colorSpace2 = CGColorSpaceCreateDeviceGray();
// colorSpace2 = CGColorSpaceCreateDeviceRGB();
}
CGDataProviderRef provider = CGDataProviderCreateWithCFData((__bridge CFDataRef)data);
CGFloat width = newImageMat.cols;
CGFloat height = newImageMat.rows;
CGImageRef imageRef = CGImageCreate(width, height, 8, 8 * newImageMat.elemSize(),
newImageMat.step[0],
colorSpace2,
kCGImageAlphaNone | kCGBitmapByteOrderDefault, provider,
NULL, false, kCGRenderingIntentDefault);
UIImage *image = [[UIImage alloc] initWithCGImage:imageRef];
CGImageRelease(imageRef);
CGDataProviderRelease(provider);
CGColorSpaceRelease(colorSpace2);
return image;
}
If you use cv2.minAreaRect, it gives the best inclosing rectangle to a contour and the degrees, so you can rotate back.
I have found myself in a situation where I have several NSImage objects that I need to rotate by 90 degrees, change the colour of pixels that are one colour to another colour and then get the RGB565 data representation for it as an NSData object.
I found the vImageConvert_ARGB8888toRGB565 function in the Accelerate framework so this should be able to do the RGB565 output.
There are a few UIImage rotation I have found here on StackOverflow, but I'm having trouble converting them to NSImage as it appears I have to use NSGraphicsContext not CGContextRef?
Ideally I would like these in an NSImage Category so I can just call.
NSImage *rotated = [inputImage rotateByDegrees:90];
NSImage *colored = [rotated changeColorFrom:[NSColor redColor] toColor:[NSColor blackColor]];
NSData *rgb565 = [colored rgb565Data];
I just don't know where to start as image manipulation is new to me.
I appreciate any help I can get.
Edit (22/04/2013)
I have managed to piece this code together to generate the RGB565 data, it generates it upside down and with some small artefacts, I assume the first is due to different coordinate systems being used and the second possibly due to me going from PNG to BMP. I will do some more testing using a BMP to start and also a non-tranparent PNG.
- (NSData *)RGB565Data
{
CGContextRef cgctx = CreateARGBBitmapContext(self.CGImage);
if (cgctx == NULL)
return nil;
size_t w = CGImageGetWidth(self.CGImage);
size_t h = CGImageGetHeight(self.CGImage);
CGRect rect = {{0,0},{w,h}};
CGContextDrawImage(cgctx, rect, self.CGImage);
void *data = CGBitmapContextGetData (cgctx);
CGContextRelease(cgctx);
if (!data)
return nil;
vImage_Buffer src;
src.data = data;
src.width = w;
src.height = h;
src.rowBytes = (w * 4);
void* destData = malloc((w * 2) * h);
vImage_Buffer dst;
dst.data = destData;
dst.width = w;
dst.height = h;
dst.rowBytes = (w * 2);
vImageConvert_ARGB8888toRGB565(&src, &dst, 0);
size_t dataSize = 2 * w * h; // RGB565 = 2 5-bit components and 1 6-bit (16 bits/2 bytes)
NSData *RGB565Data = [NSData dataWithBytes:dst.data length:dataSize];
free(destData);
return RGB565Data;
}
- (CGImageRef)CGImage
{
return [self CGImageForProposedRect:NULL context:[NSGraphicsContext currentContext] hints:nil];
}
CGContextRef CreateARGBBitmapContext (CGImageRef inImage)
{
CGContextRef context = NULL;
CGColorSpaceRef colorSpace;
void * bitmapData;
int bitmapByteCount;
int bitmapBytesPerRow;
size_t pixelsWide = CGImageGetWidth(inImage);
size_t pixelsHigh = CGImageGetHeight(inImage);
bitmapBytesPerRow = (int)(pixelsWide * 4);
bitmapByteCount = (int)(bitmapBytesPerRow * pixelsHigh);
colorSpace = CGColorSpaceCreateDeviceRGB();
if (colorSpace == NULL)
return nil;
bitmapData = malloc( bitmapByteCount );
if (bitmapData == NULL)
{
CGColorSpaceRelease( colorSpace );
return nil;
}
context = CGBitmapContextCreate (bitmapData,
pixelsWide,
pixelsHigh,
8,
bitmapBytesPerRow,
colorSpace,
kCGImageAlphaPremultipliedFirst);
if (context == NULL)
{
free (bitmapData);
fprintf (stderr, "Context not created!");
}
CGColorSpaceRelease( colorSpace );
return context;
}
For most of this, you'll want to use Core Image.
Rotation you can do with the CIAffineTransform filter. This takes an NSAffineTransform object. You may have already worked with that class before. (You could do the rotation with NSImage itself, but it's easier with Core Image and you'll probably need to use it for the next step anyway.)
I don't know what you mean by “change the colour of pixels that are one colour to another colour”; that could mean any of a lot of different things. Chances are, though, there's a filter for that.
I also don't know why you need 565 data specifically, but assuming you have a real need for that, you're correct that that function will be involved. Use CIContext's lowest-level rendering method to get 8-bit-per-component ARGB output, and then use that vImage function to convert it to 565 RGB.
I have managed to get what I want by using NSBitmapImageRep (accessing it with a bit of a hack). If anyone knows a better way of doing this, please do share.
The - (NSBitmapImageRep)bitmap method is my hack. The NSImage starts of having only an NSBitmapImageRep, however after the rotation method a CIImageRep is added which takes priority over the NSBitmapImageRep which breaks the colour code (as NSImage renders the CIImageRep which doesn't get colored).
BitmapImage.m (Subclass of NSImage)
CGContextRef CreateARGBBitmapContext (CGImageRef inImage)
{
CGContextRef context = NULL;
CGColorSpaceRef colorSpace;
void * bitmapData;
int bitmapByteCount;
int bitmapBytesPerRow;
size_t pixelsWide = CGImageGetWidth(inImage);
size_t pixelsHigh = CGImageGetHeight(inImage);
bitmapBytesPerRow = (int)(pixelsWide * 4);
bitmapByteCount = (int)(bitmapBytesPerRow * pixelsHigh);
colorSpace = CGColorSpaceCreateDeviceRGB();
if (colorSpace == NULL)
return nil;
bitmapData = malloc( bitmapByteCount );
if (bitmapData == NULL)
{
CGColorSpaceRelease( colorSpace );
return nil;
}
context = CGBitmapContextCreate (bitmapData,
pixelsWide,
pixelsHigh,
8,
bitmapBytesPerRow,
colorSpace,
kCGImageAlphaPremultipliedFirst);
if (context == NULL)
{
free (bitmapData);
fprintf (stderr, "Context not created!");
}
CGColorSpaceRelease( colorSpace );
return context;
}
- (NSData *)RGB565Data
{
CGContextRef cgctx = CreateARGBBitmapContext(self.CGImage);
if (cgctx == NULL)
return nil;
size_t w = CGImageGetWidth(self.CGImage);
size_t h = CGImageGetHeight(self.CGImage);
CGRect rect = {{0,0},{w,h}};
CGContextDrawImage(cgctx, rect, self.CGImage);
void *data = CGBitmapContextGetData (cgctx);
CGContextRelease(cgctx);
if (!data)
return nil;
vImage_Buffer src;
src.data = data;
src.width = w;
src.height = h;
src.rowBytes = (w * 4);
void* destData = malloc((w * 2) * h);
vImage_Buffer dst;
dst.data = destData;
dst.width = w;
dst.height = h;
dst.rowBytes = (w * 2);
vImageConvert_ARGB8888toRGB565(&src, &dst, 0);
size_t dataSize = 2 * w * h; // RGB565 = 2 5-bit components and 1 6-bit (16 bits/2 bytes)
NSData *RGB565Data = [NSData dataWithBytes:dst.data length:dataSize];
free(destData);
return RGB565Data;
}
- (NSBitmapImageRep*)bitmap
{
NSBitmapImageRep *bitmap = nil;
NSMutableArray *repsToRemove = [NSMutableArray array];
// Iterate through the representations that back the NSImage
for (NSImageRep *rep in self.representations)
{
// If the representation is a bitmap
if ([rep isKindOfClass:[NSBitmapImageRep class]])
{
bitmap = [(NSBitmapImageRep*)rep retain];
break;
}
else
{
[repsToRemove addObject:rep];
}
}
// If no bitmap representation was found, we create one (this shouldn't occur)
if (bitmap == nil)
{
bitmap = [[[NSBitmapImageRep alloc] initWithCGImage:self.CGImage] retain];
[self addRepresentation:bitmap];
}
for (NSImageRep *rep2 in repsToRemove)
{
[self removeRepresentation:rep2];
}
return [bitmap autorelease];
}
- (NSColor*)colorAtX:(NSInteger)x y:(NSInteger)y
{
return [self.bitmap colorAtX:x y:y];
}
- (void)setColor:(NSColor*)color atX:(NSInteger)x y:(NSInteger)y
{
[self.bitmap setColor:color atX:x y:y];
}
NSImage+Extra.m (NSImage Category)
- (CGImageRef)CGImage
{
return [self CGImageForProposedRect:NULL context:[NSGraphicsContext currentContext] hints:nil];
}
Usage
- (IBAction)load:(id)sender
{
NSOpenPanel* openDlg = [NSOpenPanel openPanel];
[openDlg setCanChooseFiles:YES];
[openDlg setCanChooseDirectories:YES];
if ( [openDlg runModalForDirectory:nil file:nil] == NSOKButton )
{
NSArray* files = [openDlg filenames];
for( int i = 0; i < [files count]; i++ )
{
NSString* fileName = [files objectAtIndex:i];
BitmapImage *image = [[BitmapImage alloc] initWithContentsOfFile:fileName];
imageView.image = image;
}
}
}
- (IBAction)colorize:(id)sender
{
float width = imageView.image.size.width;
float height = imageView.image.size.height;
BitmapImage *img = (BitmapImage*)imageView.image;
NSColor *newColor = [img colorAtX:1 y:1];
for (int x = 0; x <= width; x++)
{
for (int y = 0; y <= height; y++)
{
if ([img colorAtX:x y:y] == newColor)
{
[img setColor:[NSColor redColor] atX:x y:y];
}
}
}
[imageView setNeedsDisplay:YES];
}
- (IBAction)rotate:(id)sender
{
BitmapImage *img = (BitmapImage*)imageView.image;
BitmapImage *newImg = [img rotate90DegreesClockwise:NO];
imageView.image = newImg;
}
Edit (24/04/2013)
I have changed the following code:
- (RGBColor)colorAtX:(NSInteger)x y:(NSInteger)y
{
NSUInteger components[4];
[self.bitmap getPixel:components atX:x y:y];
//NSLog(#"R: %ld, G:%ld, B:%ld", components[0], components[1], components[2]);
RGBColor color = {components[0], components[1], components[2]};
return color;
}
- (BOOL)color:(RGBColor)a isEqualToColor:(RGBColor)b
{
return ((a.red == b.red) && (a.green == b.green) && (a.blue == b.blue));
}
- (void)setColor:(RGBColor)color atX:(NSUInteger)x y:(NSUInteger)y
{
NSUInteger components[4] = {(NSUInteger)color.red, (NSUInteger)color.green, (NSUInteger)color.blue, 255};
//NSLog(#"R: %ld, G: %ld, B: %ld", components[0], components[1], components[2]);
[self.bitmap setPixel:components atX:x y:y];
}
- (IBAction)colorize:(id)sender
{
float width = imageView.image.size.width;
float height = imageView.image.size.height;
BitmapImage *img = (BitmapImage*)imageView.image;
RGBColor oldColor = [img colorAtX:0 y:0];
RGBColor newColor;// = {255, 0, 0};
newColor.red = 255;
newColor.green = 0;
newColor.blue = 0;
for (int x = 0; x <= width; x++)
{
for (int y = 0; y <= height; y++)
{
if ([img color:[img colorAtX:x y:y] isEqualToColor:oldColor])
{
[img setColor:newColor atX:x y:y];
}
}
}
[imageView setNeedsDisplay:YES];
}
But now it changes the pixels to red the first time and then blue the second time the colorize method is called.
Edit 2 (24/04/2013)
The following code fixes it. It was because the rotation code was adding an alpha channel to the NSBitmapImageRep.
- (RGBColor)colorAtX:(NSInteger)x y:(NSInteger)y
{
if (self.bitmap.hasAlpha)
{
NSUInteger components[4];
[self.bitmap getPixel:components atX:x y:y];
RGBColor color = {components[1], components[2], components[3]};
return color;
}
else
{
NSUInteger components[3];
[self.bitmap getPixel:components atX:x y:y];
RGBColor color = {components[0], components[1], components[2]};
return color;
}
}
- (void)setColor:(RGBColor)color atX:(NSUInteger)x y:(NSUInteger)y
{
if (self.bitmap.hasAlpha)
{
NSUInteger components[4] = {255, (NSUInteger)color.red, (NSUInteger)color.green, (NSUInteger)color.blue};
[self.bitmap setPixel:components atX:x y:y];
}
else
{
NSUInteger components[3] = {color.red, color.green, color.blue};
[self.bitmap setPixel:components atX:x y:y];
}
}
Ok, I decided to spend the day researching Peter's suggestion of using CoreImage.
I had done some research previously and decided it was too hard but after an entire day of research I finally worked out what I needed to do and amazingly it couldn't be easier.
Early on I had decided that the Apple ChromaKey Core Image example would be a great starting point but the example code frightened me off due to the 3-dimensional colour cube. After watching the WWDC 2012 video on Core Image and finding some sample code on github (https://github.com/vhbit/ColorCubeSample) I decided to jump in and just give it a go.
Here are the important parts of the working code, I haven't included the RGB565Data method as I haven't written it yet, but it should be easy using the method Peter suggested:
CIImage+Extras.h
- (NSImage*) NSImage;
- (CIImage*) imageRotated90DegreesClockwise:(BOOL)clockwise;
- (CIImage*) imageWithChromaColor:(NSColor*)chromaColor BackgroundColor:(NSColor*)backColor;
- (NSColor*) colorAtX:(NSUInteger)x y:(NSUInteger)y;
CIImage+Extras.m
- (NSImage*) NSImage
{
CGContextRef cg = [[NSGraphicsContext currentContext] graphicsPort];
CIContext *context = [CIContext contextWithCGContext:cg options:nil];
CGImageRef cgImage = [context createCGImage:self fromRect:self.extent];
NSImage *image = [[NSImage alloc] initWithCGImage:cgImage size:NSZeroSize];
return [image autorelease];
}
- (CIImage*) imageRotated90DegreesClockwise:(BOOL)clockwise
{
CIImage *im = self;
CIFilter *f = [CIFilter filterWithName:#"CIAffineTransform"];
NSAffineTransform *t = [NSAffineTransform transform];
[t rotateByDegrees:clockwise ? -90 : 90];
[f setValue:t forKey:#"inputTransform"];
[f setValue:im forKey:#"inputImage"];
im = [f valueForKey:#"outputImage"];
CGRect extent = [im extent];
f = [CIFilter filterWithName:#"CIAffineTransform"];
t = [NSAffineTransform transform];
[t translateXBy:-extent.origin.x
yBy:-extent.origin.y];
[f setValue:t forKey:#"inputTransform"];
[f setValue:im forKey:#"inputImage"];
im = [f valueForKey:#"outputImage"];
return im;
}
- (CIImage*) imageWithChromaColor:(NSColor*)chromaColor BackgroundColor:(NSColor*)backColor
{
CIImage *im = self;
CIColor *backCIColor = [[CIColor alloc] initWithColor:backColor];
CIImage *backImage = [CIImage imageWithColor:backCIColor];
backImage = [backImage imageByCroppingToRect:self.extent];
[backCIColor release];
float chroma[3];
chroma[0] = chromaColor.redComponent;
chroma[1] = chromaColor.greenComponent;
chroma[2] = chromaColor.blueComponent;
// Allocate memory
const unsigned int size = 64;
const unsigned int cubeDataSize = size * size * size * sizeof (float) * 4;
float *cubeData = (float *)malloc (cubeDataSize);
float rgb[3];//, *c = cubeData;
// Populate cube with a simple gradient going from 0 to 1
size_t offset = 0;
for (int z = 0; z < size; z++){
rgb[2] = ((double)z)/(size-1); // Blue value
for (int y = 0; y < size; y++){
rgb[1] = ((double)y)/(size-1); // Green value
for (int x = 0; x < size; x ++){
rgb[0] = ((double)x)/(size-1); // Red value
float alpha = ((rgb[0] == chroma[0]) && (rgb[1] == chroma[1]) && (rgb[2] == chroma[2])) ? 0.0 : 1.0;
cubeData[offset] = rgb[0] * alpha;
cubeData[offset+1] = rgb[1] * alpha;
cubeData[offset+2] = rgb[2] * alpha;
cubeData[offset+3] = alpha;
offset += 4;
}
}
}
// Create memory with the cube data
NSData *data = [NSData dataWithBytesNoCopy:cubeData
length:cubeDataSize
freeWhenDone:YES];
CIFilter *colorCube = [CIFilter filterWithName:#"CIColorCube"];
[colorCube setValue:[NSNumber numberWithInt:size] forKey:#"inputCubeDimension"];
// Set data for cube
[colorCube setValue:data forKey:#"inputCubeData"];
[colorCube setValue:im forKey:#"inputImage"];
im = [colorCube valueForKey:#"outputImage"];
CIFilter *sourceOver = [CIFilter filterWithName:#"CISourceOverCompositing"];
[sourceOver setValue:im forKey:#"inputImage"];
[sourceOver setValue:backImage forKey:#"inputBackgroundImage"];
im = [sourceOver valueForKey:#"outputImage"];
return im;
}
- (NSColor*)colorAtX:(NSUInteger)x y:(NSUInteger)y
{
NSBitmapImageRep* bitmap = [[NSBitmapImageRep alloc] initWithCIImage:self];
NSColor *color = [bitmap colorAtX:x y:y];
[bitmap release];
return color;
}
I am new to objective c and trying the learn the basics of creating a UI. In my UIView class, I have created a grid along with buttons, yet those buttons don't actually exist (as far as I can tell). Ideally, when I click a button, the image is supposed to change, but that doesn't happen. Where should I be looking at to fix?
- (id)initWithFrame:(CGRect)frame {
if( self = [super init]){
tiles_ = [NSMutableArray array];
tileClosed = NO;
}
return self = [super initWithFrame:frame];
}
- (void) initTile : (Tile *) sender
{
int MINE_COUNT = 16;
for(int i = 0; i < MINE_COUNT; i++){
while(1){
int rand = random() % [tiles_ count];
Tile * tile = [tiles_ objectAtIndex:rand];
if(tile != sender && !tile.isMine){
tile.isMine = YES;
break;
}
}
}
tileClosed = YES;
}
- (void)drawRect:(CGRect)rect
{
NSLog( #"drawRect:" );
CGContextRef context = UIGraphicsGetCurrentContext();
// shrink into upper left quadrant
CGRect bounds = [self bounds]; // get view's location and size
CGFloat w = CGRectGetWidth( bounds ); // w = width of view (in points)
CGFloat h = CGRectGetHeight ( bounds ); // h = height of view (in points)
dw = w/16.0f; // dw = width of cell (in points)
dh = h/16.0f; // dh = height of cell (in points)
NSLog( #"view (width,height) = (%g,%g)", w, h );
NSLog( #"cell (width,height) = (%g,%g)", dw, dh );
// draw lines to form a 16x16 cell grid
CGContextBeginPath( context ); // begin collecting drawing operations
for ( int i = 1; i < 16; ++i )
{
// draw horizontal grid line
CGContextMoveToPoint( context, 0, i*dh );
CGContextAddLineToPoint( context, w, i*dh );
}
for ( int i = 1; i < 16; ++i )
{
// draw vertical grid line
CGContextMoveToPoint( context, i*dw, 0 );
CGContextAddLineToPoint( context, i*dw, h );
}
for(int x=1; x<16;x++){
for(int y=1;y<16;y++){
Tile * tile = [[Tile alloc] init];
[tile setFrame:CGRectMake(x * 16.0f, y * 16.0f, 16.0f, 16.0f)];
[tile addTarget:self action:#selector(clickCell:) forControlEvents:UIControlEventTouchUpInside];
[tiles_ addObject: tile]; }
}
[[UIColor grayColor] setStroke]; // use gray as stroke color
CGContextDrawPath( context, kCGPathStroke ); // execute collected drawing ops
}
- (void) clickCell : (Tile *) sender
{
if(! tileClosed) [self initTile: sender];
[sender open];
}
Your initwithFrame: method is broken: the return line should just be return self;.
What you're doing at the moment in effect clobbers the tiles_ array, so it ends up as nil, hence attempting to store tiles in it does nothing (and hence they aren't retained).
Your init method is incorrect and you can change it simply to
- (id)initWithFrame:(CGRect)frame
{
self = [super initWithFrame:frame]
if( self)
{
tiles_ = [NSMutableArray array];
tileClosed = NO;
}
return self;
}
and for the buttons to be appearing you are not adding it to the subview the
[self.view addSubview:tile];
is missing.
After transferring a large image from a REST endpoint, I need to divide the image into a number of smaller image tiles.
The initial image is (for instance) 1024x1024, stored in an NSData; I need to create sub-image of size 256x256 (In this case, there will be 16 sub-images).
How would this be done? (I haven't found any articles which even come close, but I assume it must be possible since most image editing software supports image cropping.)
Thanks.
This is the function I use to crop images in some of my project.
- (UIImage *)cropImage:(UIImage *) image{
CGRect rect = CGRectMake(0, 0, 256, 256);
CGImageRef subImageRef = CGImageCreateWithImageInRect(image.CGImage, rect);
CGRect smallBounds = CGRectMake(0, 0, CGImageGetWidth(subImageRef), CGImageGetHeight(subImageRef));
UIGraphicsBeginImageContext(smallBounds.size);
CGContextRef context = UIGraphicsGetCurrentContext();
CGContextDrawImage(context, smallBounds, subImageRef);
UIImage* smallImg = [UIImage imageWithCGImage:subImageRef];
UIGraphicsEndImageContext();
return smallImg;
}
I think you can fine a way from there to call it multiple times to crop your pictures 16 times .
Hope this helps
originalImageView is a IBOutlet ImageView. This image will be cropped.
#import <QuartzCore/QuartzCore.h>
This is needed for the border around each slice for better understanding.
-(UIImage*)getCropImage:(CGRect)cropRect
{
CGImageRef image = CGImageCreateWithImageInRect([originalImageView.image CGImage],cropRect);
UIImage *cropedImage = [UIImage imageWithCGImage:image];
CGImageRelease(image);
return cropedImage;
}
-(void)prepareSlices:(uint)row:(uint)col
{
float flagX = originalImageView.image.size.width / originalImageView.frame.size.width;
float flagY = originalImageView.image.size.height / originalImageView.frame.size.height;
float _width = originalImageView.frame.size.width / col;
float _height = originalImageView.frame.size.height / row;
float _posX = 0.0;
float _posY = 0.0;
for (int i = 1; i <= row * col; i++) {
UIImageView *croppedImageVeiw = [[UIImageView alloc] initWithFrame:CGRectMake(_posX, _posY, _width, _height)];
UIImage *img = [self getCropImage:CGRectMake(_posX * flagX,_posY * flagY, _width * flagX, _height * flagY)];
croppedImageVeiw.image = img;
croppedImageVeiw.layer.borderColor = [[UIColor whiteColor] CGColor];
croppedImageVeiw.layer.borderWidth = 1.0f;
[self.view addSubview:croppedImageVeiw];
[croppedImageVeiw release];
_posX += _width;
if (i % col == 0) {
_posX = 0;
_posY += _height;
}
}
originalImageView.alpha = 0.0;
}
Call it like this:
[self prepareSlices:16 :16];
#interface UIImage (Sprite)
- (NSArray *)spritesWithSpriteSheetImage:(UIImage *)image spriteSize:(CGSize)size;
- (NSArray *)spritesWithSpriteSheetImage:(UIImage *)image inRange:(NSRange)range spriteSize:(CGSize)size;
#end
#implementation UIImage (Sprite)
-(NSArray *)spritesWithSpriteSheetImage:(UIImage *)image spriteSize:(CGSize)size {
return [self spritesWithSpriteSheetImage:self inRange:NSMakeRange(0, lroundf(MAXFLOAT))
spriteSize:size];
}
-(NSArray *)spritesWithSpriteSheetImage:(UIImage *)image
inRange:(NSRange)range
spriteSize:(CGSize)size {
if (!image || CGSizeEqualToSize(size, CGSizeZero) || range.length == 0)
return nil;
NSLog(#"%i %i", range.location, range.length);
CGImageRef spriteSheet = [image CGImage];
NSMutableArray *tempArray = [[[NSMutableArray alloc] init] autorelease];
int width = CGImageGetWidth(spriteSheet);
int height = CGImageGetHeight(spriteSheet);
int maxI = width / size.width;
int startI = 0;
int startJ = 0;
int length = 0;
int startPosition = range.location;
// Extracting initial I & J values from range info
//
if (startPosition != 0) {
for (int k=1; k<=maxI; k++) {
int d = k * maxI;
if (d/startPosition == 1) {
startI = maxI - (d % startPosition);
break;
}
else if (d/startPosition > 1) {
startI = startPosition;
break;
}
startJ++;
}
}
int positionX = startI * size.width;
int positionY = startJ * size.height;
BOOL isReady = NO;
while (positionY < height) {
while (positionX < width) {
CGImageRef sprite = CGImageCreateWithImageInRect(spriteSheet, CGRectMake(positionX, positionY, size.width, size.height));
[tempArray addObject:[UIImage imageWithCGImage:sprite]];
CGImageRelease(sprite);
length++;
if (length == range.length) {
isReady = YES;
break;
}
positionX += size.width;
}
if (isReady)
break;
positionX = 0;
positionY += size.height;
}
return [NSArray arrayWithArray:tempArray];
}
#end
So i got a method to loop through an XML document. The document got a number of answervalues which are being extracted and are printed on the screen in the form of UISegmentedControls.
The code i got right now is:
- (void) traverseElement:(TBXMLElement *)element {
static CGFloat y = 300.0f;
static CGFloat dynamicHeight = 400.0f;
static int segmentId = 1;
do {
if([[TBXML elementName:element] isEqualToString:#"wcqAnswerValues"]) {
NSString *segmentItemsStr = [TBXML textForElement:element];
NSArray *segmentItemsArray = [segmentItemsStr componentsSeparatedByString:#";"];
answer = [[UISegmentedControl alloc] initWithItems:segmentItemsArray];
answer.frame = CGRectMake(50, y, 400, 40);
[answer addTarget:self action:#selector(textpopup:) forControlEvents:UIControlEventValueChanged];
answer.segmentedControlStyle = UISegmentedControlStyleBar;
answer.tag = segmentId;
[scrollView addSubview:answer];
scrollView.contentSize = CGSizeMake(768, dynamicHeight);
[formulierText removeFromSuperview];
[answer release];
y += 40.0f;
dynamicHeight += 40.0f;
segmentId++;
}
if ([[TBXML elementName:element] isEqualToString:#"formdata"]) {
y = 300.0f;
dynamicHeight = 400.0f;
segmentId = 1;
}
// if the element has child elements, process them
if (element->firstChild)
[self traverseElement:element->firstChild];
// Obtain next sibling element
} while ((element = element->nextSibling));
}
-(void)textpopup:(UISegmentedControl *)sender {
if (sender.tag == 1) {
// if the tag of the sender is 1, then do something.
}
}
What i want to achieve is this:
Each UISegmentedControl that is being printed on the screen has to have a unique number or id so i can separately assign actions to them. What is the best way to achieve this?
EDIT:
Fixed it. I will update my code.