I am working on Document edge detection using OpenCV in my iOS Project and successfully detected the edges of document.
Now, I want to rotate the image along with detected rectangle. I have referred this
Github project to detect the edges.
For that, I first rotated the image and trying to re-detect the edges by again finding the largest rectangle of the image. By unfortunately, it is not giving me exact rectangle.
Can I somebody suggest me something to detect the rotated document's edges, again or shall I rotate the detected rectangle along with image ?
Before Rotation Image
After Rotation Image
+(NSMutableArray *) getLargestSquarePoints: (UIImage *) image : (CGSize) size {
Mat imageMat;
CGColorSpaceRef colorSpace = CGImageGetColorSpace(image.CGImage);
CGFloat cols = image.size.width;
CGFloat rows = image.size.height;
cv::Mat cvMat(rows, cols, CV_8UC4); // 8 bits per component, 4 channels
CGContextRef contextRef = CGBitmapContextCreate(cvMat.data, cols, rows, 8, cvMat.step[0], colorSpace, kCGImageAlphaNoneSkipLast | kCGBitmapByteOrderDefault);
CGContextDrawImage(contextRef, CGRectMake(0, 0, cols, rows), image.CGImage);
CGContextRelease(contextRef);
imageMat = cvMat;
cv::resize(imageMat, imageMat, cvSize(size.width, size.height));
// UIImageToMat(image, imageMat);
std::vector<std::vector<cv::Point> >rectangle;
std::vector<cv::Point> largestRectangle;
getRectangles(imageMat, rectangle);
getlargestRectangle(rectangle, largestRectangle);
if (largestRectangle.size() == 4)
{
// Thanks to: https://stackoverflow.com/questions/20395547/sorting-an-array-of-x-and-y-vertice-points-ios-objective-c/20399468#20399468
NSArray *points = [NSArray array];
points = #[
[NSValue valueWithCGPoint:(CGPoint){(CGFloat)largestRectangle[0].x, (CGFloat)largestRectangle[0].y}],
[NSValue valueWithCGPoint:(CGPoint){(CGFloat)largestRectangle[1].x, (CGFloat)largestRectangle[1].y}],
[NSValue valueWithCGPoint:(CGPoint){(CGFloat)largestRectangle[2].x, (CGFloat)largestRectangle[2].y}],
[NSValue valueWithCGPoint:(CGPoint){(CGFloat)largestRectangle[3].x, (CGFloat)largestRectangle[3].y}] ];
CGPoint min = [points[0] CGPointValue];
CGPoint max = min;
for (NSValue *value in points) {
CGPoint point = [value CGPointValue];
min.x = fminf(point.x, min.x);
min.y = fminf(point.y, min.y);
max.x = fmaxf(point.x, max.x);
max.y = fmaxf(point.y, max.y);
}
CGPoint center = {
0.5f * (min.x + max.x),
0.5f * (min.y + max.y),
};
NSLog(#"center: %#", NSStringFromCGPoint(center));
NSNumber *(^angleFromPoint)(id) = ^(NSValue *value){
CGPoint point = [value CGPointValue];
CGFloat theta = atan2f(point.y - center.y, point.x - center.x);
CGFloat angle = fmodf(M_PI - M_PI_4 + theta, 2 * M_PI);
return #(angle);
};
NSArray *sortedPoints = [points sortedArrayUsingComparator:^NSComparisonResult(id a, id b) {
return [angleFromPoint(a) compare:angleFromPoint(b)];
}];
NSLog(#"sorted points: %#", sortedPoints);
NSMutableArray *squarePoints = [[NSMutableArray alloc] init];
[squarePoints addObject: [sortedPoints objectAtIndex:0]];
[squarePoints addObject: [sortedPoints objectAtIndex:1]];
[squarePoints addObject: [sortedPoints objectAtIndex:2]];
[squarePoints addObject: [sortedPoints objectAtIndex:3]];
imageMat.release();
return squarePoints;
}
else{
imageMat.release();
return nil;
}
}
void getRectangles(cv::Mat& image, std::vector<std::vector<cv::Point>>&rectangles) {
// blur will enhance edge detection
cv::Mat blurred(image);
GaussianBlur(image, blurred, cvSize(11,11), 0);
cv::Mat gray0(blurred.size(), CV_8U), gray;
std::vector<std::vector<cv::Point> > contours;
// find squares in every color plane of the image
for (int c = 0; c < 3; c++)
{
int ch[] = {c, 0};
mixChannels(&blurred, 1, &gray0, 1, ch, 1);
// try several threshold levels
const int threshold_level = 2;
for (int l = 0; l < threshold_level; l++)
{
// Use Canny instead of zero threshold level!
// Canny helps to catch squares with gradient shading
if (l == 0)
{
Canny(gray0, gray, 10, 20, 3); //
// Canny(gray0, gray, 0, 50, 5);
// Dilate helps to remove potential holes between edge segments
dilate(gray, gray, cv::Mat(), cv::Point(-1,-1));
}
else
{
gray = gray0 >= (l+1) * 255 / threshold_level;
}
// Find contours and store them in a list
findContours(gray, contours, CV_RETR_LIST, CV_CHAIN_APPROX_SIMPLE);
// Test contours
std::vector<cv::Point> approx;
for (size_t i = 0; i < contours.size(); i++)
{
// approximate contour with accuracy proportional
// to the contour perimeter
approxPolyDP(cv::Mat(contours[i]), approx, arcLength(cv::Mat(contours[i]), true)*0.02, true);
// Note: absolute value of an area is used because
// area may be positive or negative - in accordance with the
// contour orientation
if (approx.size() == 4 &&
fabs(contourArea(cv::Mat(approx))) > 1000 &&
isContourConvex(cv::Mat(approx)))
{
double maxCosine = 0;
for (int j = 2; j < 5; j++)
{
double cosine = fabs(angle(approx[j%4], approx[j-2], approx[j-1]));
maxCosine = MAX(maxCosine, cosine);
}
if (maxCosine < 0.3)
rectangles.push_back(approx);
}
}
}
}
}
void getlargestRectangle(const std::vector<std::vector<cv::Point> >&rectangles, std::vector<cv::Point>& largestRectangle)
{
if (!rectangles.size())
{
return;
}
double maxArea = 0;
int index = 0;
for (size_t i = 0; i < rectangles.size(); i++)
{
cv::Rect rectangle = boundingRect(cv::Mat(rectangles[i]));
double area = rectangle.width * rectangle.height;
if (maxArea < area)
{
maxArea = area;
index = i;
}
}
largestRectangle = rectangles[index];
}
double angle(cv::Point pt1, cv::Point pt2, cv::Point pt0) {
double dx1 = pt1.x - pt0.x;
double dy1 = pt1.y - pt0.y;
double dx2 = pt2.x - pt0.x;
double dy2 = pt2.y - pt0.y;
return (dx1*dx2 + dy1*dy2)/sqrt((dx1*dx1 + dy1*dy1)*(dx2*dx2 + dy2*dy2) + 1e-10);
}
+(UIImage *) getTransformedImage: (CGFloat) newWidth : (CGFloat) newHeight : (UIImage *) origImage : (CGPoint [4]) corners : (CGSize) size {
cv::Mat imageMat;
CGColorSpaceRef colorSpace = CGImageGetColorSpace(origImage.CGImage);
CGFloat cols = size.width;
CGFloat rows = size.height;
cv::Mat cvMat(rows, cols, CV_8UC4); // 8 bits per component, 4 channels
CGContextRef contextRef = CGBitmapContextCreate(cvMat.data,
// Pointer to backing data
cols,
// Width of bitmap
rows,
// Height of bitmap
8,
// Bits per component
cvMat.step[0],
// Bytes per row
colorSpace,
// Colorspace
kCGImageAlphaNoneSkipLast |
kCGBitmapByteOrderDefault); // Bitmap info flags
CGContextDrawImage(contextRef, CGRectMake(0, 0, cols, rows), origImage.CGImage);
CGContextRelease(contextRef);
imageMat = cvMat;
cv::Mat newImageMat = cv::Mat( cvSize(newWidth,newHeight), CV_8UC4);
cv::Point2f src[4], dst[4];
src[0].x = corners[0].x;
src[0].y = corners[0].y;
src[1].x = corners[1].x;
src[1].y = corners[1].y;
src[2].x = corners[2].x;
src[2].y = corners[2].y;
src[3].x = corners[3].x;
src[3].y = corners[3].y;
dst[0].x = 0;
dst[0].y = -10;
dst[1].x = newWidth - 1;
dst[1].y = -10;
dst[2].x = newWidth - 1;
dst[2].y = newHeight + 1;
dst[3].x = 0;
dst[3].y = newHeight + 1;
dst[0].x = 0;
dst[0].y = 0;
dst[1].x = newWidth - 1;
dst[1].y = 0;
dst[2].x = newWidth - 1;
dst[2].y = newHeight - 1;
dst[3].x = 0;
dst[3].y = newHeight - 1;
cv::warpPerspective(imageMat, newImageMat, cv::getPerspectiveTransform(src, dst), cvSize(newWidth, newHeight));
//Transform to UIImage
NSData *data = [NSData dataWithBytes:newImageMat.data length:newImageMat.elemSize() * newImageMat.total()];
CGColorSpaceRef colorSpace2;
if (newImageMat.elemSize() == 1) {
colorSpace2 = CGColorSpaceCreateDeviceGray();
} else {
colorSpace2 = CGColorSpaceCreateDeviceGray();
// colorSpace2 = CGColorSpaceCreateDeviceRGB();
}
CGDataProviderRef provider = CGDataProviderCreateWithCFData((__bridge CFDataRef)data);
CGFloat width = newImageMat.cols;
CGFloat height = newImageMat.rows;
CGImageRef imageRef = CGImageCreate(width, height, 8, 8 * newImageMat.elemSize(),
newImageMat.step[0],
colorSpace2,
kCGImageAlphaNone | kCGBitmapByteOrderDefault, provider,
NULL, false, kCGRenderingIntentDefault);
UIImage *image = [[UIImage alloc] initWithCGImage:imageRef];
CGImageRelease(imageRef);
CGDataProviderRelease(provider);
CGColorSpaceRelease(colorSpace2);
return image;
}
If you use cv2.minAreaRect, it gives the best inclosing rectangle to a contour and the degrees, so you can rotate back.
i want to get all the points in my UIImageVeiw , so i can change "some" points colors without the need of UITouch .. is it possible?
What i am thinking about is:
get all points in uiimageview
get color of each point.
if the previous color= some specific color, then change the color.
i googled this a lot but i found that all the tutorials depends on UITouch like this http://www.markj.net/iphone-uiimage-pixel-color/
my main goal now is how to get all points ?!
any help is appreciated
i found a solution .. hope it will help anyone one day.
this method returns array of pixels in some image.
-(NSArray*)getRGBAsFromImage:(UIImage*)image atX:(int)x andY:(int)y count:(int)count
{
NSMutableArray *result = [NSMutableArray arrayWithCapacity:count];
// First get the image into your data buffer
CGImageRef imageRef = [image CGImage];
NSUInteger width = CGImageGetWidth(imageRef);
NSUInteger height = CGImageGetHeight(imageRef);
CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB();
unsigned char *rawData = (unsigned char*) calloc(height * width * 4, sizeof(unsigned char));
NSUInteger bytesPerPixel = 4;
NSUInteger bytesPerRow = bytesPerPixel * width;
NSUInteger bitsPerComponent = 8;
CGContextRef context = CGBitmapContextCreate(rawData, width, height,
bitsPerComponent, bytesPerRow, colorSpace,
kCGImageAlphaPremultipliedLast | kCGBitmapByteOrder32Big);
CGColorSpaceRelease(colorSpace);
CGContextDrawImage(context, CGRectMake(0, 0, width, height), imageRef);
CGContextRelease(context);
// Now your rawData contains the image data in the RGBA8888 pixel format.
NSUInteger byteIndex = (bytesPerRow * y) + x * bytesPerPixel;
for (int i = 0 ; i < count ; ++i)
{
CGFloat red = (rawData[byteIndex] * 1.0) / 255.0;
CGFloat green = (rawData[byteIndex + 1] * 1.0) / 255.0;
CGFloat blue = (rawData[byteIndex + 2] * 1.0) / 255.0;
CGFloat alpha = (rawData[byteIndex + 3] * 1.0) / 255.0;
byteIndex += bytesPerPixel;
UIColor *acolor = [UIColor colorWithRed:red green:green blue:blue alpha:alpha];
[result addObject:acolor];
}
free(rawData);
return result;}
you can call it like this:
NSArray*arrayOfPixels= [self getRGBAsFromImage:_patternFirstImage.image atX:_patternFirstImage.frame.origin.x andY:_patternFirstImage.frame.origin.y count:_patternFirstImage.frame.size.width*_patternFirstImage.frame.size.height];
NSLog(#"arrayOfPixels = %zd",[arrayOfPixels count]);
you can loop through all pixels to get its color like the following :
for(int i=0;i<[arrayOfPixels count];i++){
NSLog(#"objects = ---%#----",[arrayOfPixels objectAtIndex:i]);
if([self color:[arrayOfPixels objectAtIndex:i] isEqualToColor:UIColorFromRGB(0xE0E0E0) withTolerance:0.2]){
NSLog(#"index = %zd",i);
}
}
thanx a lot for the answer here:
https://stackoverflow.com/a/1262893/2168496
Recently I was coding in Spritekit
What I need is to find a point of the bezier curve while SKNode is moving follow the UIBezierPath.BTW the bezier curve just has one control point.
I use the equation of bezier curve to find the 't', and use this 't' to get the point,I found that my code is just fit with the "straight line" of the bezier curve.If there is a big curve, I would be wrong, I think the speed of SKNode is not constant.I am very pleasure to know where is not right or get other solution.
Here is my code, it is for testing.
-(void)setupTestCurve
{
float ts =0.56f;
float timeofmove = 5.0f;
SKSpriteNode* arrow = [SKSpriteNode spriteNodeWithImageNamed:#"blue_b_01"];
arrow.scale = 0.8;
arrow.anchorPoint = CGPointMake(0.5, 0.5);
[background addChild:arrow];
CGPoint point1 = CGPointMake(400, 400);
CGPoint point2 = CGPointMake(800, 100);
CGPoint controlP = CGPointMake(100, 100);
[ToolObjective setupTestCurve:(SKSpriteNode *)background startp:point1 endp:point2 anchorPoint:controlP];
CGPoint ps = [ToolObjective get3dPointbezierInterpolation:ts startp:point1 controlp:controlP endp:point2];
float twill = [ToolObjective get3dTofbezierInterpolation:point1 nowPoint:ps endp:point2 anchorPoint:controlP];
SKAction *w = [SKAction waitForDuration:twill * timeofmove];
SKAction *paction = [SKAction runBlock:^{
[ToolObjective setupTestpoint:background startp:ps color:[UIColor redColor]];
}];
SKAction *m = [SKAction sequence:#[w,paction]];
CGPathRef patharrow = [ToolObjective Get3dBezierPath:point1 endPoint:point2 anchorPoint:controlP];
SKAction *moveBow = [SKAction followPath:patharrow asOffset:NO orientToPath:YES duration:timeofmove];
[arrow runAction:[SKAction group:#[m,moveBow]]];
}
here is the tool function
/*
add a bezier curve to the background
*/
+(void)setupTestCurve:(SKSpriteNode *)background startp:(CGPoint)startPoint endp:(CGPoint)endpoint anchorPoint:(CGPoint)anchorPoint
{
UIBezierPath *path=[UIBezierPath bezierPath];
[path moveToPoint:startPoint];
[path addQuadCurveToPoint:endpoint controlPoint:anchorPoint];
SKShapeNode *circle = [SKShapeNode node];
circle.path = path.CGPath;
[background addChild:circle];
}
/*
get point on a bezier curve with 't'
*/
+(CGPoint) get3dPointbezierInterpolation:(CGFloat)t startp:(CGPoint)startp controlp:(CGPoint)controlp endp:(CGPoint)endp
{
CGFloat q0x = (1 - t) * startp.x + t * controlp.x;
CGFloat q1x = (1 - t) * controlp.x + t * endp.x;
CGFloat x = (1 - t) * q0x + t * q1x;
CGFloat q0y = (1 - t) * startp.y + t * controlp.y;
CGFloat q1y = (1 - t) * controlp.y + t * endp.y;
CGFloat y = (1 - t) * q0y + t * q1y;
return CGPointMake(x, y);
}
/*
get 't' of a bezier curve with a point on this bezier curve
*/
+(CGFloat) get3dTofbezierInterpolation:(CGPoint)startp nowPoint:(CGPoint)nowp endp:(CGPoint)endp anchorPoint:(CGPoint)controlp
{
CGFloat ax = (startp.x - 2 * controlp.x + endp.x);
CGFloat bx = (2 * (controlp.x - startp.x));
CGFloat cx = (startp.x - nowp.x);
CGFloat tx1,tx2;
CGFloat px = bx * bx - 4 * ax * cx;
if (px >= 0) {
tx1 = (-bx + sqrt(px)) / (2 * ax);
tx2 = (-bx - sqrt(px)) / (2 * ax);
}
if (tx1 == tx2) {
return tx1;
}
CGFloat ay = (startp.y - 2 * controlp.y + endp.y);
CGFloat by = (2 * (controlp.y - startp.y));
CGFloat cy = (startp.y - nowp.y);
CGFloat ty1,ty2;
CGFloat py = by * by - 4 * ay * cy;
if (py >= 0) {
ty1 = (-by + sqrt(py)) / (2 * ay);
ty2 = (-by - sqrt(py)) / (2 * ay);
}
if (ty1 == ty2) {
return ty1;
}
if (fabsf(tx1 - ty1) < 0.0001)
return tx1;
else if (fabsf(tx1 - ty2) < 0.0001) {
return tx1;
}
else if (fabsf(tx2 - ty1) < 0.0001) {
return tx2;
}
else if (fabsf(tx2 - ty2) < 0.0001) {
return tx2;
}
return 0;
}
/*
get bezier curve path with just one anchorPoint
*/
+(CGPathRef)Get3dBezierPath:(CGPoint)startPoint endPoint:(CGPoint)endPoint anchorPoint:(CGPoint)anchorPoint
{
UIBezierPath *path=[UIBezierPath bezierPath];
[path moveToPoint:startPoint];
[path addQuadCurveToPoint:endPoint controlPoint:anchorPoint];
return path.CGPath;
}
/*
add a point on bezier curve
*/
+(void)setupTestpoint:(SKSpriteNode *)background startp:(CGPoint)point color:(UIColor *)color
{
SKShapeNode *cpoint = [SKShapeNode node];
UIBezierPath *ppath = [UIBezierPath bezierPathWithRect:CGRectMake(point.x,point.y,10,10)];
cpoint.fillColor = color;
cpoint.path = ppath.CGPath;
[background addChild:cpoint];
}
You can use these code in the project to test.
Thank you very much!
A day later I find that sknode is not constant speed moving follow the bezier path. Use this method.
for (float i = 0; i < 1.0001f; i+=0.10f) {
CGPoint ps = [ToolObjective get3dPointbezierInterpolation: i startp:point1 controlp:controlP endp:point2];
[ToolObjective setupTestpoint:background startp:ps color:[UIColor yellowColor]];
}
So the key problem is to find a way to make the sknode has a constant speed.
I will post my solution here.Or you have already known please tell me !
I implemented a methode that returns a NSBitmapImageRep. Onto that bitmap 10x2 rectangles should be drawn and each rectangle should be filled with the color cyan. But for each rectangle the cyan value should be increased by 12 (value starts at 0).
The result bitmap gets 20 rectangles, like expected. But the color doesn't differ between the rectangles. All rectangles have the same cyan value.
I have no idea what's the problem. Can somebody please give me a hint?
-(NSBitmapImageRep*)drawOntoBitmap
{
NSRect offscreenRect = NSMakeRect(0.0, 0.0, 1000.0, 400.0);
NSBitmapImageRep *image = nil;
image = [[NSBitmapImageRep alloc] initWithBitmapDataPlanes:nil
pixelsWide:offscreenRect.size.width
pixelsHigh:offscreenRect.size.height
bitsPerSample:8
samplesPerPixel:4
hasAlpha:NO
isPlanar:NO
colorSpaceName:NSDeviceCMYKColorSpace
bitmapFormat:0
bytesPerRow:(4 * offscreenRect.size.width)
bitsPerPixel:32];
[NSGraphicsContext saveGraphicsState];
[NSGraphicsContext setCurrentContext:[NSGraphicsContext graphicsContextWithBitmapImageRep:image]];
NSRect colorRect;
NSBezierPath *thePath;
int cyan = 0;
int x = 0;
int y = 0;
int w = 0;
int h = 0;
for (intj = 0; j<2; j++)
{
y = j * 200;
h = y + 200;
for (int i = 0; i<10; i++)
{
x = i * 100;
w = x + 100;
colorRect = NSMakeRect(x, y, w, h);
thePath = [NSBezierPath bezierPathWithRect: colorRect];
cyan += 12;
[[NSColor colorWithDeviceCyan:cyan magenta:0 yellow:0 black:0 alpha:100] set];
[thePath fill];
}
}
[NSGraphicsContext restoreGraphicsState];
return image;
}
For each rect the same color value is used and it's the last cyan value that's set after the both loops are passed.
OK, found out that the NSColor value has a range of 0.0 - 1.0.
So I have to set my cyan to float like that:
cyan += 12/255;
The value has to be smaller than 1.0.
I am trying to create a custom MKOverlayView class that draws the overlay since the normal MKOverlay and MKOverlayView does not suit my needs for the application I am building.
I am having a problem with the drawing of the polygon overlay, when I draw it; it looks no where near as good as if I let MKOverlayView draw it and by that i mean the edges are not sharp and are all pixelated when you zoom in on the map. Also the lines from point to point dont get drawn either for some reason.
Also when zooming in some of the polygons drawing gets clipped out until I zoom back out again.
here is my draw code
- (void)drawMapRect:(MKMapRect)mapRect zoomScale:(MKZoomScale)zoomScale inContext:(CGContextRef)context{
MapOverlay *newOverlay = (MapOverlay *)self.overlay;
CGColorRef ref2 = [newOverlay.strokeColor CGColor];
CGContextSetLineWidth(context, newOverlay.lineWidth);
CGColorRef ref = [newOverlay.fillColor CGColor];
int _countComponents = CGColorGetNumberOfComponents(ref);
if (_countComponents == 4) {
const CGFloat *_components2 = CGColorGetComponents(ref);
CGFloat red = _components2[0];
CGFloat green = _components2[1];
CGFloat blue = _components2[2];
CGFloat alpha = _components2[3];
CGContextSetRGBFillColor(context, red, green, blue, alpha);
}
for (int i=0; i<newOverlay.coordinates.count; i++){
if(i % 2 == 0) {
CLLocationCoordinate2D p;
p.latitude = (CLLocationDegrees)[(NSString *)[newOverlay.coordinates objectAtIndex:i+1]floatValue];
p.longitude = (CLLocationDegrees)[(NSString *)[newOverlay.coordinates objectAtIndex:i] floatValue];
//CLLocation *p = [[CLLocation alloc] initWithLatitude:(CLLocationDegrees)[(NSString *)[newOverlay.coordinates objectAtIndex:i+1] floatValue] longitude:(CLLocationDegrees)[(NSString *)[newOverlay.coordinates objectAtIndex:i] floatValue]];
MKMapPoint point = MKMapPointForCoordinate(p);
CGPoint point2 = [self pointForMapPoint:point];
if (i==0){
CGContextMoveToPoint(context, point2.x, point2.y);
}else{
CGContextAddLineToPoint(context, point2.x, point2.y);
}
}
}
CGContextDrawPath(context, kCGPathFillStroke);
//CGContextStrokePath(context);
}
I have found very little information on custom MKOverlayView classes and how to draw on the map but these were the 2 tutorials that i was using to do this
http://spitzkoff.com/craig/?p=65
http://i.ndigo.com.br/2012/05/ios-maps-with-image-overlays/
UPDATE
I have a feeling it might have something to do with the bounding box of the overlay because if I return the bounding box of the world it displays fine but thats obviously not efficient to have every possible overlay drawn.
here is how I found the bounding box of my overlay in my custom MKOverlay class
- (MKMapRect)boundingMapRect{
double maxY = 0;
double minY = 0;
double maxX = 0;
double minX = 0;
for(NSUInteger i = 0;i < coordinates.count; i++) {
if(i % 2 == 0) {
CLLocationCoordinate2D tempLoc;
tempLoc.latitude =(CLLocationDegrees)[(NSString *)[coordinates objectAtIndex:(NSUInteger)i + 1] floatValue];
tempLoc.longitude = (CLLocationDegrees)[(NSString *)[coordinates objectAtIndex:(NSUInteger)i] floatValue];
MKMapPoint tempPoint = MKMapPointForCoordinate(tempLoc);
if(i == 0){
minX = tempPoint.x;
minY = tempPoint.y;
if(tempPoint.x > maxX){
maxX = tempPoint.x;
}
if (tempPoint.y > maxY){
maxY = tempPoint.y;
}
}else{
if(tempPoint.x > maxX){
maxX = tempPoint.x;
}
if(tempPoint.x < minX){
minX = tempPoint.x;
}
if (tempPoint.y > maxY){
maxY = tempPoint.y;
}
if(tempPoint.y < minY){
minY = tempPoint.y;
}
}
}
}
MKMapRect b2 = MKMapRectMake(minX, maxY, minX-maxX, minY-maxY);
return b2;
//return MKMapRectWorld;
}