Graphics in iOS Game Rendering Incorrectly - objective-c

I'm trying follow this tutorial for writing a Tiny Wings type game.
I'm stuck at the end of it. Instead of getting the rolling hills, I'm getting a really bad looking triangle drawing. I've posted some screenshots bellow.
I'm at the point in the tutorial when it mentions to uncomment the stuff to get it to display properly in the debugger. I've done that and it's still doing this. I'm wondering if this is just the simulator or if I messed up somewhere. Whatever is happening, it's rendering periodically, so I think the cosine function must be working in terms of the math, but that still doesn't explain this behavior. Basically, I'm stumped.
Here's the code I'm using
HelloWorldLayer.h
#import "cocos2d.h"
#import "Terrain.h"
/*#import "Box2D.h"
#import "GLES-Render.h"*/
// HelloWorldLayer
#interface HelloWorldLayer : CCLayer
{
CCSprite *_background;
Terrain *_terrain;
/*b2World* world;
GLESDebugDraw *m_debugDraw;*/
}
// returns a CCScene that contains the HelloWorldLayer as the only child
+(CCScene *) scene;
// adds a new sprite at a given coordinate
//-(void) addNewSpriteWithCoords:(CGPoint)p;
#end
HelloWorldLayer.m
#import "HelloWorldLayer.h"
//Pixel to metres ratio. Box2D uses metres as the unit for measurement.
//This ratio defines how many pixels correspond to 1 Box2D "metre"
//Box2D is optimized for objects of 1x1 metre therefore it makes sense
//to define the ratio so that your most common object type is 1x1 metre.
#define PTM_RATIO 32
// enums that will be used as tags
/*enum {
kTagTileMap = 1,
kTagBatchNode = 1,
kTagAnimation1 = 1,
};*/
// HelloWorldLayer implementation
#implementation HelloWorldLayer
+(CCScene *) scene
{
// 'scene' is an autorelease object.
CCScene *scene = [CCScene node];
// 'layer' is an autorelease object.
HelloWorldLayer *layer = [HelloWorldLayer node];
// add layer as a child to scene
[scene addChild: layer];
// return the scene
return scene;
}
-(CCSprite *)spriteWithColor:(ccColor4F)bgColor textureSize:(float)textureSize {
// 1: Create new CCRenderTexture
CCRenderTexture *rt = [CCRenderTexture renderTextureWithWidth:textureSize height:textureSize];
// 2: Call CCRenderTexture:begin
[rt beginWithClear:bgColor.r g:bgColor.g b:bgColor.b a:bgColor.a];
//Add Gradient to image
/*The basic idea is we’ll draw a black rectangle on top of the texture, but it will be completely transparent up top, and opaque at the bottom. This will keep the top untouched, but gradually darken the image going down*/
glDisable(GL_TEXTURE_2D);
glDisableClientState(GL_TEXTURE_COORD_ARRAY);
float gradientAlpha;
CGPoint vertices[4];
ccColor4F colors[4];
int nVertices = 0;
vertices[nVertices] = CGPointMake(0, 0);
colors[nVertices++] = (ccColor4F){0, 0, 0, 0 };
vertices[nVertices] = CGPointMake(textureSize, 0);
colors[nVertices++] = (ccColor4F){0, 0, 0, 0};
vertices[nVertices] = CGPointMake(0, textureSize);
colors[nVertices++] = (ccColor4F){0, 0, 0, gradientAlpha};
vertices[nVertices] = CGPointMake(textureSize, textureSize);
colors[nVertices++] = (ccColor4F){0, 0, 0, gradientAlpha};
glVertexPointer(2, GL_FLOAT, 0, vertices);
glColorPointer(4, GL_FLOAT, 0, colors);
glDrawArrays(GL_TRIANGLE_STRIP, 0, (GLsizei)nVertices);
glEnableClientState(GL_TEXTURE_COORD_ARRAY);
glEnable(GL_TEXTURE_2D);
// 3: Draw into the texture
// We'll add this later
CCSprite *noise = [CCSprite spriteWithFile:#"Noise.png"];
[noise setBlendFunc:(ccBlendFunc){GL_DST_COLOR, GL_ZERO}];
noise.position = ccp(textureSize/2, textureSize/2);
[noise visit];
// 4: Call CCRenderTexture:end
[rt end];
// 5: Create a new Sprite from the texture
return [CCSprite spriteWithTexture:rt.sprite.texture];
}
-(CCSprite *)stripedSpriteWithColor1:(ccColor4F)c1 color2:(ccColor4F)c2
textureSize:(float)textureSize stripes:(int)nStripes {
// 1:Create new CCRenderTexture
CCRenderTexture *rt = [CCRenderTexture renderTextureWithWidth:textureSize height:textureSize];
// 2: Call CCRenderTexture:begin
[rt beginWithClear:c1.r g:c1.g b:c1.b a:c1.a];
// 3: Draw into the texture
// Layer 1: Stripes
glDisable(GL_TEXTURE_2D);
glDisableClientState(GL_TEXTURE_COORD_ARRAY);
glDisableClientState(GL_COLOR_ARRAY);
CGPoint vertices[nStripes*6];
int nVertices = 0;
float x1 = -textureSize;
float x2;
float y1 = textureSize;
float y2 = 0;
float dx = textureSize/ nStripes*2;
float stripeWidth = dx/2;
for (int i = 0; i<nStripes; i++) {
x2 = x1 +textureSize;
vertices[nVertices++]=CGPointMake(x1, y1);
vertices[nVertices++]=CGPointMake(x1+stripeWidth, y1);
vertices[nVertices++]=CGPointMake(x2, y2);
vertices[nVertices++]= vertices[nVertices-2];
vertices[nVertices++]= vertices[nVertices-2];
vertices[nVertices++]=CGPointMake(x2+stripeWidth, y2);
x1 += dx;
}
glColor4f(c2.r, c2.g, c2.b, c2.a);
glVertexPointer(2, GL_FLOAT, 0, vertices);
glDrawArrays(GL_TRIANGLES, 0, (GLsizei)nVertices);
// layer 2: gradient
glEnableClientState(GL_COLOR_ARRAY);
float gradientAlpha = 0.7;
ccColor4F colors[4];
nVertices = 0;
vertices[nVertices] = CGPointMake(0, 0);
colors[nVertices++] = (ccColor4F){0, 0, 0, 0 };
vertices[nVertices] = CGPointMake(textureSize, 0);
colors[nVertices++] = (ccColor4F){0, 0, 0, 0};
vertices[nVertices] = CGPointMake(0, textureSize);
colors[nVertices++] = (ccColor4F){0, 0, 0, gradientAlpha};
vertices[nVertices] = CGPointMake(textureSize, textureSize);
colors[nVertices++] = (ccColor4F){0, 0, 0, gradientAlpha};
glVertexPointer(2, GL_FLOAT, 0, vertices);
glColorPointer(4, GL_FLOAT, 0, colors);
glDrawArrays(GL_TRIANGLE_STRIP, 0, (GLsizei)nVertices);
// layer 3: top highlight
float borderWidth = textureSize/16;
float borderAlpha = 0.3f;
nVertices = 0;
vertices[nVertices] = CGPointMake(0, 0);
colors[nVertices++] = (ccColor4F){1,1,1,borderAlpha};
vertices[nVertices] = CGPointMake(textureSize, 0);
colors[nVertices++] = (ccColor4F){1,1,1,borderAlpha};
vertices[nVertices] = CGPointMake(0, borderWidth);
colors[nVertices++] = (ccColor4F){0, 0, 0, 0};
vertices[nVertices] = CGPointMake(textureSize, borderWidth);
colors[nVertices++] = (ccColor4F){0, 0, 0, 0};
glVertexPointer(2, GL_FLOAT, 0, vertices);
glColorPointer(4, GL_FLOAT, 0, colors);
glBlendFunc(GL_DST_COLOR, GL_ONE_MINUS_SRC_ALPHA);
glDrawArrays(GL_TRIANGLE_STRIP, 0, (GLsizei)nVertices);
glEnableClientState(GL_TEXTURE_COORD_ARRAY);
glEnable(GL_TEXTURE_2D);
// Layer 2: Noise
CCSprite *noise = [CCSprite spriteWithFile:#"Noise.png"];
[noise setBlendFunc:(ccBlendFunc){GL_DST_COLOR, GL_ZERO}];
noise.position = ccp(textureSize/2, textureSize/2);
[noise visit];
// 4: Call CCRenderTexture:end
[rt end];
// 5: Create a new Sprite from the texture
return [CCSprite spriteWithTexture:rt.sprite.texture];
}
-(ccColor4F)randomBrightColor {
while (true) {
float requiredBrightness = 192;
ccColor4B randomColor = ccc4(arc4random() % 255,
arc4random() % 255,
arc4random() % 255,
255);
if (randomColor.r > requiredBrightness ||
randomColor.g > requiredBrightness ||
randomColor.b > requiredBrightness) {
return ccc4FFromccc4B(randomColor);
}
}
}
-(void)genBackground {
[_background removeFromParentAndCleanup:YES];
ccColor4F bgColor = [self randomBrightColor];
/*new code*/
//ccColor4F color2 = [self randomBrightColor];
/*new code*/
_background = [self spriteWithColor:bgColor textureSize:512];
/*new code*/
// int nStripes = ((arc4random() % 4) + 1) * 2;
//_background = [self stripedSpriteWithColor1:bgColor color2:color2 textureSize:512 stripes:nStripes];
//self.scale = 0.5;
/*new code*/
CGSize winSize = [CCDirector sharedDirector].winSize;
_background.position = ccp(winSize.width/2,winSize.height/2);
ccTexParams tp = {GL_LINEAR, GL_LINEAR, GL_REPEAT, GL_REPEAT};
[_background.texture setTexParameters:&tp];
[self addChild:_background z:-1];
ccColor4F color3 = [self randomBrightColor];
ccColor4F color4 = [self randomBrightColor];
CCSprite *stripes = [self stripedSpriteWithColor1:color3 color2:color4 textureSize:512 stripes:4];
ccTexParams tp2 = {GL_LINEAR, GL_LINEAR, GL_REPEAT, GL_CLAMP_TO_EDGE};
[stripes.texture setTexParameters:&tp2];
_terrain.stripes = stripes;
/*The important part is the texture parameters:
GL_LINEAR is a fancy way of saying “when displaying the texture at a smaller or larger scale than the original size, take a weighted average of the nearby pixels.”
GL_REPEAT is a fancy way of saying “if you try to index a texture at a coordinate outside the texture bounds, put what would be there if the texture were to continuously tile.”*/
}
// on "init" you need to initialize your instance
-(id) init
{
// always call "super" init
// Apple recommends to re-assign "self" with the "super" return value
if( (self=[super init])) {
_terrain = [Terrain node];
[self addChild:_terrain z:1];
[self genBackground];
self.isTouchEnabled = YES;
[self scheduleUpdate];
}
self.scale = 1.0;
return self;
}
-(void)update:(ccTime)dt {
float PIXELS_PER_SECOND = 100;
static float offset = 0;
offset += PIXELS_PER_SECOND * dt;
CGSize textureSize = _background.textureRect.size;
[_background setTextureRect:CGRectMake(offset, 0, textureSize.width, textureSize.height)];
[_terrain setOffsetX:offset];
}
-(void)ccTouchesBegan:(NSSet *)touches withEvent:(UIEvent *)event {
[self genBackground];
}
// on "dealloc" you need to release all your retained objects
- (void) dealloc
{
// in case you have something to dealloc, do it in this method
/*delete world;
world = NULL;
delete m_debugDraw;*/
// don't forget to call "super dealloc"
[super dealloc];
}
#end
Terrain.h
#import "CCNode.h"
#import "cocos2d.h"
#class HelloWorldLayer;
#define kMaxHillKeyPoints 1000
#define kHillSegmentWidth 10
#define kMaxHillVertices 4000
#define kMaxBorderVertices 800
#interface Terrain : CCNode {
int _offsetX;
CGPoint _hillKeyPoints[kMaxHillKeyPoints];
CCSprite *_stripes;
int _fromKeyPointI;
int _toKeyPointI;
int _nHillVertices;
CGPoint _hillVertices[kMaxHillVertices];
CGPoint _hillTexCoords[kMaxHillVertices];
int _nBorderVertices;
CGPoint _borderVertices[kMaxBorderVertices];
}
#property (retain) CCSprite * stripes;
-(void)setOffsetX:(float)newOffsetX;
#end
Terrain.m
#import "Terrain.h"
#import "HelloWorldLayer.h"
#implementation Terrain
#synthesize stripes = _stripes;
-(void)generateHills {
/*
The strategy in this algorithm is the following:
Increment x-axis in the range of 160 + a random number between 0-40
Increment y-axis in the range of 60 + a random number between 0-40
Except: reverse the y-axis offset every other time.
Don’t let the y value get too close to the top or bottom (paddingTop, paddingBottom)
Start offscreen to the left, and hardcode the second point to (0, winSize.height/2), so there’s a hill coming up from the left offscreen.*/
CGSize winSize = [CCDirector sharedDirector].winSize;
float minDX = 160;
float minDY = 60;
int rangeDX = 80;
int rangeDY= 40;
float x = -minDX;
float y = winSize.height/2 - minDY;
float dy, ny;
float sign = 1;// +1 - going up, -1 - going down
float paddingTop = 20;
float paddingBottom = 20;
for (int i=0; i<kMaxHillKeyPoints; i++) {
_hillKeyPoints[i] = CGPointMake(x, y);
if (i == 0) {
x = 0;
y = winSize.height/2;
} else {
x+=rand()%rangeDX+minDX;
while (true) {
dy = rand()%rangeDY + minDY;
ny = y + dy*sign;
if (ny < winSize.height - paddingTop && ny > paddingBottom) {
break;
}
}
y = ny;
}
sign *= -1;
}
/*float x = 0;
float y = winSize.width/2;
for (int i = 0; i<kMaxHillKeyPoints; ++i) {
_hillKeyPoints[i] = CGPointMake(x, y);
x += winSize.width/2;
y = random() % (int) winSize.height;
}*/
}
-(void)resetHillVertices {
CGSize winSize = [CCDirector sharedDirector].winSize;
static int prevFromKeyPointI = -1;
static int prevToKeyPointI = -1;
// key points interval for drawing
// key points interval for drawing
while (_hillKeyPoints[_fromKeyPointI+1].x < _offsetX-winSize.width/8/self.scale) {
_fromKeyPointI++;
}
while (_hillKeyPoints[_toKeyPointI].x < _offsetX+winSize.width*9/8/self.scale) {
_toKeyPointI++;
}
if (prevFromKeyPointI != _fromKeyPointI || prevToKeyPointI != _toKeyPointI) {
// vertices for visible area
_nHillVertices = 0;
_nBorderVertices =0;
CGPoint p0, p1, pt0, pt1;
p0 = _hillKeyPoints[_fromKeyPointI];
for (int i = _fromKeyPointI+1; i<_toKeyPointI+1; i++) {
p1 = _hillKeyPoints[i];
// triangle strip between p0 and p1
int hSegments = floorf((p1.x-p0.x)/kHillSegmentWidth);
float dx = (p1.x - p0.x)/hSegments;
float da = M_PI / hSegments;
float ymid = (p0.y + p1.y)/2;
float ampl = (p0.y - p1.y)/2;
pt0 = p0;
_borderVertices[_nBorderVertices++] = pt0;
for (int j=1; j<hSegments+1; j++) {
pt1.x = p0.x + j* dx;
pt1.y = ymid +ampl * cosf(da*j);
_borderVertices[_nBorderVertices++] = pt1;
_hillVertices[_nHillVertices] = CGPointMake(pt0.x, 0);
_hillTexCoords[_nHillVertices++] = CGPointMake(pt0.x/512, 1.0f);
_hillVertices[_nHillVertices] = CGPointMake(pt1.x, 0);
_hillTexCoords[_nHillVertices++] = CGPointMake(pt1.x/512, 1.0f);
_hillVertices[_nHillVertices] = CGPointMake(pt0.x, pt0.y);
_hillTexCoords[_nHillVertices++] = CGPointMake(pt0.x/512, 0);
_hillVertices[_nHillVertices] = CGPointMake(pt1.x, pt1.y);
_hillVertices[_nHillVertices++] = CGPointMake(pt1.x/512, 0);
pt0 = pt1;
}
p0 = p1;
}
prevFromKeyPointI = _fromKeyPointI;
prevToKeyPointI = _toKeyPointI;
}
}
-(id)init {
if ((self = [super init])) {
[self generateHills];
}
[self resetHillVertices];
return self;
}
-(void) draw {
glBindTexture(GL_TEXTURE_2D, _stripes.texture.name);
glDisableClientState(GL_COLOR_ARRAY);
glColor4f(1, 1, 1, 1);
glVertexPointer(2, GL_FLOAT, 0, _hillVertices);
glTexCoordPointer(2, GL_FLOAT, 0, _hillTexCoords);
glDrawArrays(GL_TRIANGLE_STRIP, 0, (GLsizei)_nHillVertices);
// glEnableClientState(GL_COLOR_ARRAY);
for (int i= MAX(_fromKeyPointI, 1); i <= _toKeyPointI; ++i) {
glColor4f(1.0, 0, 0, 1.0);
//ccDrawLine(_hillKeyPoints[i-1], _hillKeyPoints[i]);
glColor4f(1.0, 1.0, 1.0, 1.0);
CGPoint p0 = _hillKeyPoints[i-1];
CGPoint p1 = _hillKeyPoints[i];
int hSegments = floorf((p1.x-p0.x)/kHillSegmentWidth);
float dx = (p1.x-p0.x)/hSegments;
float da = M_PI /hSegments;
float ymid = (p0.y + p1.y)/2;
float ampl = (p0.y - p1.y)/2;
CGPoint pt0, pt1;
pt0 = p0;
for (int j= 0; j<hSegments+1; ++j) {
pt1.x = p0.x +j*dx;
pt1.y = ymid + ampl * cosf(da*j);
//ccDrawLine(pt0, pt1);
pt0 = pt1;
}
}
}
-(void)setOffsetX:(float)newOffsetX {
_offsetX = newOffsetX;
self.position = CGPointMake(-_offsetX*self.scale, 0);
[self resetHillVertices];
}
-(void) dealloc {
[_stripes release];
_stripes = NULL;
[super dealloc];
}
#end

Just a hunch, are you using cocos2d 2.0 perhaps?
Because the code you posted uses OpenGL ES 1.1 commands and that won't work (correctly) when you're using cocos2d 2.x. If you are using cocos2d 2.0 try again with v1.x.

Figured out the problem.
_hillVertices[_nHillVertices] = CGPointMake(pt0.x, 0);
_hillTexCoords[_nHillVertices++] = CGPointMake(pt0.x/512, 1.0f);
_hillVertices[_nHillVertices] = CGPointMake(pt1.x, 0);
_hillTexCoords[_nHillVertices++] = CGPointMake(pt1.x/512, 1.0f);
_hillVertices[_nHillVertices] = CGPointMake(pt0.x, pt0.y);
_hillTexCoords[_nHillVertices++] = CGPointMake(pt0.x/512, 0);
_hillVertices[_nHillVertices] = CGPointMake(pt1.x, pt1.y);
_hillVertices[_nHillVertices++] = CGPointMake(pt1.x/512, 0); //This line should be
//modifying the
//_hillTexCoords
//array

Related

Rotate already detected rectangle with OpenCV Swift

I am working on Document edge detection using OpenCV in my iOS Project and successfully detected the edges of document.
Now, I want to rotate the image along with detected rectangle. I have referred this
Github project to detect the edges.
For that, I first rotated the image and trying to re-detect the edges by again finding the largest rectangle of the image. By unfortunately, it is not giving me exact rectangle.
Can I somebody suggest me something to detect the rotated document's edges, again or shall I rotate the detected rectangle along with image ?
Before Rotation Image
After Rotation Image
+(NSMutableArray *) getLargestSquarePoints: (UIImage *) image : (CGSize) size {
Mat imageMat;
CGColorSpaceRef colorSpace = CGImageGetColorSpace(image.CGImage);
CGFloat cols = image.size.width;
CGFloat rows = image.size.height;
cv::Mat cvMat(rows, cols, CV_8UC4); // 8 bits per component, 4 channels
CGContextRef contextRef = CGBitmapContextCreate(cvMat.data, cols, rows, 8, cvMat.step[0], colorSpace, kCGImageAlphaNoneSkipLast | kCGBitmapByteOrderDefault);
CGContextDrawImage(contextRef, CGRectMake(0, 0, cols, rows), image.CGImage);
CGContextRelease(contextRef);
imageMat = cvMat;
cv::resize(imageMat, imageMat, cvSize(size.width, size.height));
// UIImageToMat(image, imageMat);
std::vector<std::vector<cv::Point> >rectangle;
std::vector<cv::Point> largestRectangle;
getRectangles(imageMat, rectangle);
getlargestRectangle(rectangle, largestRectangle);
if (largestRectangle.size() == 4)
{
// Thanks to: https://stackoverflow.com/questions/20395547/sorting-an-array-of-x-and-y-vertice-points-ios-objective-c/20399468#20399468
NSArray *points = [NSArray array];
points = #[
[NSValue valueWithCGPoint:(CGPoint){(CGFloat)largestRectangle[0].x, (CGFloat)largestRectangle[0].y}],
[NSValue valueWithCGPoint:(CGPoint){(CGFloat)largestRectangle[1].x, (CGFloat)largestRectangle[1].y}],
[NSValue valueWithCGPoint:(CGPoint){(CGFloat)largestRectangle[2].x, (CGFloat)largestRectangle[2].y}],
[NSValue valueWithCGPoint:(CGPoint){(CGFloat)largestRectangle[3].x, (CGFloat)largestRectangle[3].y}] ];
CGPoint min = [points[0] CGPointValue];
CGPoint max = min;
for (NSValue *value in points) {
CGPoint point = [value CGPointValue];
min.x = fminf(point.x, min.x);
min.y = fminf(point.y, min.y);
max.x = fmaxf(point.x, max.x);
max.y = fmaxf(point.y, max.y);
}
CGPoint center = {
0.5f * (min.x + max.x),
0.5f * (min.y + max.y),
};
NSLog(#"center: %#", NSStringFromCGPoint(center));
NSNumber *(^angleFromPoint)(id) = ^(NSValue *value){
CGPoint point = [value CGPointValue];
CGFloat theta = atan2f(point.y - center.y, point.x - center.x);
CGFloat angle = fmodf(M_PI - M_PI_4 + theta, 2 * M_PI);
return #(angle);
};
NSArray *sortedPoints = [points sortedArrayUsingComparator:^NSComparisonResult(id a, id b) {
return [angleFromPoint(a) compare:angleFromPoint(b)];
}];
NSLog(#"sorted points: %#", sortedPoints);
NSMutableArray *squarePoints = [[NSMutableArray alloc] init];
[squarePoints addObject: [sortedPoints objectAtIndex:0]];
[squarePoints addObject: [sortedPoints objectAtIndex:1]];
[squarePoints addObject: [sortedPoints objectAtIndex:2]];
[squarePoints addObject: [sortedPoints objectAtIndex:3]];
imageMat.release();
return squarePoints;
}
else{
imageMat.release();
return nil;
}
}
void getRectangles(cv::Mat& image, std::vector<std::vector<cv::Point>>&rectangles) {
// blur will enhance edge detection
cv::Mat blurred(image);
GaussianBlur(image, blurred, cvSize(11,11), 0);
cv::Mat gray0(blurred.size(), CV_8U), gray;
std::vector<std::vector<cv::Point> > contours;
// find squares in every color plane of the image
for (int c = 0; c < 3; c++)
{
int ch[] = {c, 0};
mixChannels(&blurred, 1, &gray0, 1, ch, 1);
// try several threshold levels
const int threshold_level = 2;
for (int l = 0; l < threshold_level; l++)
{
// Use Canny instead of zero threshold level!
// Canny helps to catch squares with gradient shading
if (l == 0)
{
Canny(gray0, gray, 10, 20, 3); //
// Canny(gray0, gray, 0, 50, 5);
// Dilate helps to remove potential holes between edge segments
dilate(gray, gray, cv::Mat(), cv::Point(-1,-1));
}
else
{
gray = gray0 >= (l+1) * 255 / threshold_level;
}
// Find contours and store them in a list
findContours(gray, contours, CV_RETR_LIST, CV_CHAIN_APPROX_SIMPLE);
// Test contours
std::vector<cv::Point> approx;
for (size_t i = 0; i < contours.size(); i++)
{
// approximate contour with accuracy proportional
// to the contour perimeter
approxPolyDP(cv::Mat(contours[i]), approx, arcLength(cv::Mat(contours[i]), true)*0.02, true);
// Note: absolute value of an area is used because
// area may be positive or negative - in accordance with the
// contour orientation
if (approx.size() == 4 &&
fabs(contourArea(cv::Mat(approx))) > 1000 &&
isContourConvex(cv::Mat(approx)))
{
double maxCosine = 0;
for (int j = 2; j < 5; j++)
{
double cosine = fabs(angle(approx[j%4], approx[j-2], approx[j-1]));
maxCosine = MAX(maxCosine, cosine);
}
if (maxCosine < 0.3)
rectangles.push_back(approx);
}
}
}
}
}
void getlargestRectangle(const std::vector<std::vector<cv::Point> >&rectangles, std::vector<cv::Point>& largestRectangle)
{
if (!rectangles.size())
{
return;
}
double maxArea = 0;
int index = 0;
for (size_t i = 0; i < rectangles.size(); i++)
{
cv::Rect rectangle = boundingRect(cv::Mat(rectangles[i]));
double area = rectangle.width * rectangle.height;
if (maxArea < area)
{
maxArea = area;
index = i;
}
}
largestRectangle = rectangles[index];
}
double angle(cv::Point pt1, cv::Point pt2, cv::Point pt0) {
double dx1 = pt1.x - pt0.x;
double dy1 = pt1.y - pt0.y;
double dx2 = pt2.x - pt0.x;
double dy2 = pt2.y - pt0.y;
return (dx1*dx2 + dy1*dy2)/sqrt((dx1*dx1 + dy1*dy1)*(dx2*dx2 + dy2*dy2) + 1e-10);
}
+(UIImage *) getTransformedImage: (CGFloat) newWidth : (CGFloat) newHeight : (UIImage *) origImage : (CGPoint [4]) corners : (CGSize) size {
cv::Mat imageMat;
CGColorSpaceRef colorSpace = CGImageGetColorSpace(origImage.CGImage);
CGFloat cols = size.width;
CGFloat rows = size.height;
cv::Mat cvMat(rows, cols, CV_8UC4); // 8 bits per component, 4 channels
CGContextRef contextRef = CGBitmapContextCreate(cvMat.data,
// Pointer to backing data
cols,
// Width of bitmap
rows,
// Height of bitmap
8,
// Bits per component
cvMat.step[0],
// Bytes per row
colorSpace,
// Colorspace
kCGImageAlphaNoneSkipLast |
kCGBitmapByteOrderDefault); // Bitmap info flags
CGContextDrawImage(contextRef, CGRectMake(0, 0, cols, rows), origImage.CGImage);
CGContextRelease(contextRef);
imageMat = cvMat;
cv::Mat newImageMat = cv::Mat( cvSize(newWidth,newHeight), CV_8UC4);
cv::Point2f src[4], dst[4];
src[0].x = corners[0].x;
src[0].y = corners[0].y;
src[1].x = corners[1].x;
src[1].y = corners[1].y;
src[2].x = corners[2].x;
src[2].y = corners[2].y;
src[3].x = corners[3].x;
src[3].y = corners[3].y;
dst[0].x = 0;
dst[0].y = -10;
dst[1].x = newWidth - 1;
dst[1].y = -10;
dst[2].x = newWidth - 1;
dst[2].y = newHeight + 1;
dst[3].x = 0;
dst[3].y = newHeight + 1;
dst[0].x = 0;
dst[0].y = 0;
dst[1].x = newWidth - 1;
dst[1].y = 0;
dst[2].x = newWidth - 1;
dst[2].y = newHeight - 1;
dst[3].x = 0;
dst[3].y = newHeight - 1;
cv::warpPerspective(imageMat, newImageMat, cv::getPerspectiveTransform(src, dst), cvSize(newWidth, newHeight));
//Transform to UIImage
NSData *data = [NSData dataWithBytes:newImageMat.data length:newImageMat.elemSize() * newImageMat.total()];
CGColorSpaceRef colorSpace2;
if (newImageMat.elemSize() == 1) {
colorSpace2 = CGColorSpaceCreateDeviceGray();
} else {
colorSpace2 = CGColorSpaceCreateDeviceGray();
// colorSpace2 = CGColorSpaceCreateDeviceRGB();
}
CGDataProviderRef provider = CGDataProviderCreateWithCFData((__bridge CFDataRef)data);
CGFloat width = newImageMat.cols;
CGFloat height = newImageMat.rows;
CGImageRef imageRef = CGImageCreate(width, height, 8, 8 * newImageMat.elemSize(),
newImageMat.step[0],
colorSpace2,
kCGImageAlphaNone | kCGBitmapByteOrderDefault, provider,
NULL, false, kCGRenderingIntentDefault);
UIImage *image = [[UIImage alloc] initWithCGImage:imageRef];
CGImageRelease(imageRef);
CGDataProviderRelease(provider);
CGColorSpaceRelease(colorSpace2);
return image;
}
If you use cv2.minAreaRect, it gives the best inclosing rectangle to a contour and the degrees, so you can rotate back.

Memory Leak in Objective C function with ARC

here is the function from the library MGSpotyViewController, but I am asking what to watch out in this kind of situations.
I am used to C++ and maintaining my own objects but I can't find the problem here. I tried very basic pieces of codes with UIGraphicsBeginImageContextWithOptions and memory still increased slightly but I wasn't sure if I located the problem or that's because the GC hasn't run yet.
To test the following function I did:
- (BOOL)application:(UIApplication *)application didFinishLaunchingWithOptions:(NSDictionary *)launchOptions
{
UIImage* image = [UIImage imageNamed:#"Dude.png"];
for (int i =0; i<1000; i++) {
#autoreleasepool {
// doesn't matter if i move the first line here.
UIImage *blurredImage = [image applyBlurWithRadius:20.0f
tintColor:nil
saturationDeltaFactor:1.0f
maskImage:nil];
}
}
}
The result was like this:
What is/might be the problem?
UIImage+ImageEffects.m
- (UIImage *)applyBlurWithRadius:(CGFloat)blurRadius tintColor:(UIColor *)tintColor saturationDeltaFactor:(CGFloat)saturationDeltaFactor maskImage:(UIImage *)maskImage
{
// Check pre-conditions.
if(self.size.width < 1 || self.size.height < 1) {
NSLog (#"*** error: invalid size: (%.2f x %.2f). Both dimensions must be >= 1: %#", self.size.width, self.size.height, self);
return nil;
}
if(!self.CGImage) {
NSLog (#"*** error: image must be backed by a CGImage: %#", self);
return nil;
}
if(maskImage && !maskImage.CGImage) {
NSLog (#"*** error: maskImage must be backed by a CGImage: %#", maskImage);
return nil;
}
CGRect imageRect = { CGPointZero, self.size };
UIImage *effectImage = self;
CGFloat scale = 0.5f;
scale = [[UIScreen mainScreen] scale];
BOOL hasBlur = blurRadius > __FLT_EPSILON__;
BOOL hasSaturationChange = fabs(saturationDeltaFactor - 1.) > __FLT_EPSILON__;
if(hasBlur || hasSaturationChange) {
UIGraphicsBeginImageContextWithOptions(self.size, NO, scale);
CGContextRef effectInContext = UIGraphicsGetCurrentContext();
CGContextScaleCTM(effectInContext, 1.0, -1.0);
CGContextTranslateCTM(effectInContext, 0, -self.size.height);
CGContextDrawImage(effectInContext, imageRect, self.CGImage);
vImage_Buffer effectInBuffer;
effectInBuffer.data = CGBitmapContextGetData(effectInContext);
effectInBuffer.width = CGBitmapContextGetWidth(effectInContext);
effectInBuffer.height = CGBitmapContextGetHeight(effectInContext);
effectInBuffer.rowBytes = CGBitmapContextGetBytesPerRow(effectInContext);
UIGraphicsBeginImageContextWithOptions(self.size, NO, scale);
CGContextRef effectOutContext = UIGraphicsGetCurrentContext();
vImage_Buffer effectOutBuffer;
effectOutBuffer.data = CGBitmapContextGetData(effectOutContext);
effectOutBuffer.width = CGBitmapContextGetWidth(effectOutContext);
effectOutBuffer.height = CGBitmapContextGetHeight(effectOutContext);
effectOutBuffer.rowBytes = CGBitmapContextGetBytesPerRow(effectOutContext);
if(hasBlur) {
// A description of how to compute the box kernel width from the Gaussian
// radius (aka standard deviation) appears in the SVG spec:
// http://www.w3.org/TR/SVG/filters.html#feGaussianBlurElement
//
// For larger values of 's' (s >= 2.0), an approximation can be used: Three
// successive box-blurs build a piece-wise quadratic convolution kernel, which
// approximates the Gaussian kernel to within roughly 3%.
//
// let d = floor(s * 3*sqrt(2*pi)/4 + 0.5)
//
// ... if d is odd, use three box-blurs of size 'd', centered on the output pixel.
//
CGFloat inputRadius = blurRadius * scale;
NSUInteger radius = floor(inputRadius * 3. * sqrt(2 * M_PI) / 4 + 0.5);
if(radius % 2 != 1) {
radius += 1; // force radius to be odd so that the three box-blur methodology works.
}
vImageBoxConvolve_ARGB8888(&effectInBuffer, &effectOutBuffer, NULL, 0, 0, (int)radius, (int)radius, 0, kvImageEdgeExtend);
vImageBoxConvolve_ARGB8888(&effectOutBuffer, &effectInBuffer, NULL, 0, 0, (int)radius, (int)radius, 0, kvImageEdgeExtend);
vImageBoxConvolve_ARGB8888(&effectInBuffer, &effectOutBuffer, NULL, 0, 0, (int)radius, (int)radius, 0, kvImageEdgeExtend);
}
BOOL effectImageBuffersAreSwapped = NO;
if(hasSaturationChange) {
CGFloat s = saturationDeltaFactor;
CGFloat floatingPointSaturationMatrix[] = {
0.0722 + 0.9278 * s, 0.0722 - 0.0722 * s, 0.0722 - 0.0722 * s, 0,
0.7152 - 0.7152 * s, 0.7152 + 0.2848 * s, 0.7152 - 0.7152 * s, 0,
0.2126 - 0.2126 * s, 0.2126 - 0.2126 * s, 0.2126 + 0.7873 * s, 0,
0, 0, 0, 1,
};
const int32_t divisor = 256;
NSUInteger matrixSize = sizeof(floatingPointSaturationMatrix)/sizeof(floatingPointSaturationMatrix[0]);
int16_t saturationMatrix[matrixSize];
for (NSUInteger i = 0; i < matrixSize; ++i) {
saturationMatrix[i] = (int16_t)roundf(floatingPointSaturationMatrix[i] * divisor);
}
if(hasBlur) {
vImageMatrixMultiply_ARGB8888(&effectOutBuffer, &effectInBuffer, saturationMatrix, divisor, NULL, NULL, kvImageNoFlags);
effectImageBuffersAreSwapped = YES;
}
else {
vImageMatrixMultiply_ARGB8888(&effectInBuffer, &effectOutBuffer, saturationMatrix, divisor, NULL, NULL, kvImageNoFlags);
}
}
if(!effectImageBuffersAreSwapped)
effectImage = UIGraphicsGetImageFromCurrentImageContext();
UIGraphicsEndImageContext();
if(effectImageBuffersAreSwapped)
effectImage = UIGraphicsGetImageFromCurrentImageContext();
UIGraphicsEndImageContext();
}
// Set up output context.
UIGraphicsBeginImageContextWithOptions(self.size, NO, scale);
CGContextRef outputContext = UIGraphicsGetCurrentContext();
CGContextScaleCTM(outputContext, 1.0, -1.0);
CGContextTranslateCTM(outputContext, 0, -self.size.height);
// Draw base image.
CGContextDrawImage(outputContext, imageRect, self.CGImage);
// Draw effect image.
if(hasBlur) {
CGContextSaveGState(outputContext);
if(maskImage) {
CGContextClipToMask(outputContext, imageRect, maskImage.CGImage);
}
CGContextDrawImage(outputContext, imageRect, effectImage.CGImage);
CGContextRestoreGState(outputContext);
}
// Add in color tint.
if(tintColor) {
CGContextSaveGState(outputContext);
CGContextSetFillColorWithColor(outputContext, tintColor.CGColor);
CGContextFillRect(outputContext, imageRect);
CGContextRestoreGState(outputContext);
}
// Output image is ready.
UIImage *outputImage = UIGraphicsGetImageFromCurrentImageContext();
UIGraphicsEndImageContext();
return outputImage;
}
Wow, wow.
It turns out if the input UIImage to this function has scale different than [[UIScreen mainScreen] scale] (because I set it in the function UIGraphicsBeginImageContextWithOptions) , it goes crazy.
I am not sure how or why, I am not sure even if this could classify as a general problem but I am going to accept this as an answer for now.

Trying to draw a simple line loop

I'm going crazy. All I want to do is draw a simple line loop. But all I get is a diagonal line from the origin out to (-0.5, 0.0, -0.5). What am I doing wrong?
#interface Frustum : NSObject {
GLuint _vertexArray;
GLuint _vertexBuffer;
GLfloat left_side[4][3];
}
- (id) init;
- (GLuint) getVertexArray;
- (void) render;
#end
#implementation Frustum
- (id) init {
left_side[0][0] = -0.5f; left_side[0][1] = 0.0f; left_side[0][2] = -0.5f;
left_side[1][0] = 0.5f; left_side[1][1] = 0.0f; left_side[1][2] = -0.5f;
left_side[2][0] = 0.5f; left_side[2][1] = 0.0f; left_side[2][2] = -0.5f;
left_side[3][0] = -0.5f; left_side[3][1] = 0.0f; left_side[3][2] = -0.5f;
glGenVertexArraysOES(1, &_vertexArray);
glBindVertexArrayOES(_vertexArray);
glGenBuffers(1, &_vertexBuffer);
glBindBuffer(GL_ARRAY_BUFFER, _vertexBuffer);
// SOMETHING IS WRONG IN HERE, BUT I DON'T KNOW WHAT!!!!!
glBufferData(GL_ARRAY_BUFFER, sizeof(GLfloat)*12, left_side, GL_STATIC_DRAW);
glEnableVertexAttribArray(GLKVertexAttribPosition);
glVertexAttribPointer(GLKVertexAttribPosition, 3, GL_FLOAT, GL_FALSE, sizeof(left_side), NULL);
glBindVertexArrayOES(0);
}
return self;
}
- (void) render {
glLineWidth(2.0f);
glDrawArrays(GL_LINE_LOOP, 0, 4);
}
Try changing your y or z values, so you form an actual loop.
(-.5, 0, 0 )
(.5, 0, 0 )
(.5, .5, 0)
(-.5 .5, 0 )
will form a line loop. You just have the same y, so that's a line.
Remember, the default camera will looking down the -z axis, so it won't see changes in z (unless you've rotated the camera).

Create a sub-image from a larger image

After transferring a large image from a REST endpoint, I need to divide the image into a number of smaller image tiles.
The initial image is (for instance) 1024x1024, stored in an NSData; I need to create sub-image of size 256x256 (In this case, there will be 16 sub-images).
How would this be done? (I haven't found any articles which even come close, but I assume it must be possible since most image editing software supports image cropping.)
Thanks.
This is the function I use to crop images in some of my project.
- (UIImage *)cropImage:(UIImage *) image{
CGRect rect = CGRectMake(0, 0, 256, 256);
CGImageRef subImageRef = CGImageCreateWithImageInRect(image.CGImage, rect);
CGRect smallBounds = CGRectMake(0, 0, CGImageGetWidth(subImageRef), CGImageGetHeight(subImageRef));
UIGraphicsBeginImageContext(smallBounds.size);
CGContextRef context = UIGraphicsGetCurrentContext();
CGContextDrawImage(context, smallBounds, subImageRef);
UIImage* smallImg = [UIImage imageWithCGImage:subImageRef];
UIGraphicsEndImageContext();
return smallImg;
}
I think you can fine a way from there to call it multiple times to crop your pictures 16 times .
Hope this helps
originalImageView is a IBOutlet ImageView. This image will be cropped.
#import <QuartzCore/QuartzCore.h>
This is needed for the border around each slice for better understanding.
-(UIImage*)getCropImage:(CGRect)cropRect
{
CGImageRef image = CGImageCreateWithImageInRect([originalImageView.image CGImage],cropRect);
UIImage *cropedImage = [UIImage imageWithCGImage:image];
CGImageRelease(image);
return cropedImage;
}
-(void)prepareSlices:(uint)row:(uint)col
{
float flagX = originalImageView.image.size.width / originalImageView.frame.size.width;
float flagY = originalImageView.image.size.height / originalImageView.frame.size.height;
float _width = originalImageView.frame.size.width / col;
float _height = originalImageView.frame.size.height / row;
float _posX = 0.0;
float _posY = 0.0;
for (int i = 1; i <= row * col; i++) {
UIImageView *croppedImageVeiw = [[UIImageView alloc] initWithFrame:CGRectMake(_posX, _posY, _width, _height)];
UIImage *img = [self getCropImage:CGRectMake(_posX * flagX,_posY * flagY, _width * flagX, _height * flagY)];
croppedImageVeiw.image = img;
croppedImageVeiw.layer.borderColor = [[UIColor whiteColor] CGColor];
croppedImageVeiw.layer.borderWidth = 1.0f;
[self.view addSubview:croppedImageVeiw];
[croppedImageVeiw release];
_posX += _width;
if (i % col == 0) {
_posX = 0;
_posY += _height;
}
}
originalImageView.alpha = 0.0;
}
Call it like this:
[self prepareSlices:16 :16];
#interface UIImage (Sprite)
- (NSArray *)spritesWithSpriteSheetImage:(UIImage *)image spriteSize:(CGSize)size;
- (NSArray *)spritesWithSpriteSheetImage:(UIImage *)image inRange:(NSRange)range spriteSize:(CGSize)size;
#end
#implementation UIImage (Sprite)
-(NSArray *)spritesWithSpriteSheetImage:(UIImage *)image spriteSize:(CGSize)size {
return [self spritesWithSpriteSheetImage:self inRange:NSMakeRange(0, lroundf(MAXFLOAT))
spriteSize:size];
}
-(NSArray *)spritesWithSpriteSheetImage:(UIImage *)image
inRange:(NSRange)range
spriteSize:(CGSize)size {
if (!image || CGSizeEqualToSize(size, CGSizeZero) || range.length == 0)
return nil;
NSLog(#"%i %i", range.location, range.length);
CGImageRef spriteSheet = [image CGImage];
NSMutableArray *tempArray = [[[NSMutableArray alloc] init] autorelease];
int width = CGImageGetWidth(spriteSheet);
int height = CGImageGetHeight(spriteSheet);
int maxI = width / size.width;
int startI = 0;
int startJ = 0;
int length = 0;
int startPosition = range.location;
// Extracting initial I & J values from range info
//
if (startPosition != 0) {
for (int k=1; k<=maxI; k++) {
int d = k * maxI;
if (d/startPosition == 1) {
startI = maxI - (d % startPosition);
break;
}
else if (d/startPosition > 1) {
startI = startPosition;
break;
}
startJ++;
}
}
int positionX = startI * size.width;
int positionY = startJ * size.height;
BOOL isReady = NO;
while (positionY < height) {
while (positionX < width) {
CGImageRef sprite = CGImageCreateWithImageInRect(spriteSheet, CGRectMake(positionX, positionY, size.width, size.height));
[tempArray addObject:[UIImage imageWithCGImage:sprite]];
CGImageRelease(sprite);
length++;
if (length == range.length) {
isReady = YES;
break;
}
positionX += size.width;
}
if (isReady)
break;
positionX = 0;
positionY += size.height;
}
return [NSArray arrayWithArray:tempArray];
}
#end

Flood fill Crash

I have been trying to get a simple Flood Fill Algorithm working for an iPhone app that I am developing and I just can't get it working correctly.
I have got the actual process to work great however the app will crash when the fill is too large. From what I can tell its because the thread is overflowing from all of the functions running. From what I have read, I need to implement a stack but I can't work out how this works.
typedef struct {
int red;
int green;
int blue;
} color;
#interface EMFloodTest : UIViewController {
UIImageView *mainImage;
unsigned char *imageData;
color selColor;
color newColor;
int maxByte;
}
#end
#implementation EMFloodTest
- (void)setupImageData {
CGImageRef imageRef = mainImage.image.CGImage;
if (imageRef == NULL) { return; }
NSUInteger width = CGImageGetWidth(imageRef);
NSUInteger height = CGImageGetHeight(imageRef);
CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB();
NSUInteger bytesPerPixel = 4;
NSUInteger bytesPerRow = bytesPerPixel * width;
NSUInteger bitsPerComponent = 8;
maxByte = height * width * 4;
imageData = malloc(height * width * 4);
CGContextRef context = CGBitmapContextCreate(imageData, width, height, bitsPerComponent, bytesPerRow, colorSpace,
kCGImageAlphaPremultipliedLast | kCGBitmapByteOrder32Big);
CGColorSpaceRelease(colorSpace);
CGContextDrawImage(context, CGRectMake(0, 0, width, height), imageRef);
CGContextRelease(context);
}
- (id)initWithNibName:(NSString *)nibNameOrNil bundle:(NSBundle *)nibBundleOrNil
{
self = [super initWithNibName:nibNameOrNil bundle:nibBundleOrNil];
if (self) {
mainImage = [[UIImageView alloc]initWithImage:[UIImage imageNamed:#"Color6.png"]];
[self.view addSubview:mainImage];
newColor.red = 255;
newColor.green = 94;
newColor.blue = 0;
[self setupImageData];
}
return self;
}
- (void)updateImage {
CGImageRef imageRef = mainImage.image.CGImage;
if (imageRef == NULL) { return; }
NSUInteger width = CGImageGetWidth(imageRef);
NSUInteger height = CGImageGetHeight(imageRef);
CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB();
NSUInteger bytesPerPixel = 4;
NSUInteger bytesPerRow = bytesPerPixel * width;
NSUInteger bitsPerComponent = 8;
CGContextRef context = CGBitmapContextCreate(imageData, width, height, bitsPerComponent, bytesPerRow, colorSpace, kCGImageAlphaPremultipliedLast );
imageRef = CGBitmapContextCreateImage (context);
mainImage.image = [UIImage imageWithCGImage:imageRef];
CGContextRelease(context);
}
- (void)setPixel:(NSUInteger)byte toColor:(color)color {
imageData[byte] = color.red;
imageData[byte+1] = color.green;
imageData[byte+2] = color.blue;
}
- (BOOL)testByte:(NSInteger)byte againstColor:(color)color {
if (imageData[byte] == color.red && imageData[byte+1] == color.green && imageData[byte+2] == color.blue) {
return YES;
} else {
return NO;
}
}
// This is where the flood fill starts. Its a basic implementation but crashes when filling large sections.
- (void)floodFillFrom:(NSInteger)byte bytesPerRow:(NSInteger)bpr {
int u = byte - bpr;
int r = byte + 4;
int d = byte + bpr;
int l = byte - 4;
if ([self testByte:u againstColor:selColor]) {
[self setPixel:u toColor:newColor];
[self floodFillFrom:u bytesPerRow:bpr];
}
if ([self testByte:r againstColor:selColor]) {
[self setPixel:r toColor:newColor];
[self floodFillFrom:r bytesPerRow:bpr];
}
if ([self testByte:d againstColor:selColor]) {
[self setPixel:d toColor:newColor];
[self floodFillFrom:d bytesPerRow:bpr];
}
if ([self testByte:l againstColor:selColor]) {
[self setPixel:l toColor:newColor];
[self floodFillFrom:l bytesPerRow:bpr];
}
}
- (void)startFillFrom:(NSInteger)byte bytesPerRow:(NSInteger)bpr {
if (imageData[byte] == 0 && imageData[byte+1] == 0 && imageData[byte+2] == 0) {
NSLog(#"Black Selected");
return;
} else if ([self testByte:byte againstColor:newColor]) {
NSLog(#"Same Fill Color");
} else {
// code goes here
NSLog(#"Color to be replaced");
[self floodFrom:byte bytesPerRow:bpr];
[self updateImage];
}
}
- (void)selectedColor:(CGPoint)point {
CGImageRef imageRef = mainImage.image.CGImage;
if (imageRef == NULL) { return; }
if (imageData == NULL) { return; }
NSInteger width = CGImageGetWidth(imageRef);
NSInteger byteNumber = 4*((width*round(point.y))+round(point.x));
NSInteger bytesPerPixel = 4;
NSInteger bytesPerRow = bytesPerPixel * width;
selColor.red = imageData[byteNumber];
selColor.green = imageData[byteNumber + 1];
selColor.blue = imageData[byteNumber + 2];
NSLog(#"Selected Color, RGB: %i, %i, %i",selColor.red, selColor.green, selColor.blue);
NSLog(#"Byte:%i",byteNumber);
[self startFillFrom:byteNumber bytesPerRow:bytesPerRow];
}
- (void)touchesBegan:(NSSet *)touches withEvent:(UIEvent *)event {
UITouch *touch = [touches anyObject];
CGPoint location = [touch locationInView:mainImage];
[self selectedColor:location];
}
Any help on how I might be able to implement a stack or even use another algorithm would be greatly appreciated.
Best,
Darren
The problem is recursive implementation.
Too much deep recursive call to function make stack overflow error.
You have to implement your algorithm in iterative manner.
If you want to see iterative example of flood Fill you can visit:
UIImageScanlineFloodfill