The code which I used to draw on images is as given below, I am using panGesture to find where the user touches. Now when I use this code the lines the user draws comes as points when i am moving my hands over the image very fast.
UIGraphicsBeginImageContextWithOptions((self.thatsMyImage.frame.size), NO, 0.0);
[self.selfieImage.image drawInRect:CGRectMake(0,0,self.thatsMyImage.frame.size.width, self.thatsMyImage.frame.size.height)];
[[UIColor whiteColor] set];
CGContextMoveToPoint(UIGraphicsGetCurrentContext(), from.x, from.y);
CGContextAddLineToPoint(UIGraphicsGetCurrentContext(), to.x , to.y);
CGContextSetLineWidth(UIGraphicsGetCurrentContext(), 10.0f);
CGContextStrokeEllipseInRect(UIGraphicsGetCurrentContext(), CGRectMake(to.x, to.y,10,10));
CGContextSetFillColor(UIGraphicsGetCurrentContext(), CGColorGetComponents([[UIColor blueColor] CGColor]));
CGContextFillPath(UIGraphicsGetCurrentContext());
CGContextStrokePath(UIGraphicsGetCurrentContext());
self.thatsMyImage.image = UIGraphicsGetImageFromCurrentImageContext();
UIGraphicsEndImageContext();
This is the method which is being called when panGesture is detected.
-(void)freeFormDrawing:(UIPanGestureRecognizer *)gesture
{
if(gesture.state == UIGestureRecognizerStateChanged)
{
CGPoint p = [gesture locationInView:self.selfieImage];
CGPoint startPoint = lastPoint;
lastPoint = [gesture locationInView:self.selfieImage];
[self drawLineFrom:startPoint endPoint:p];
}
if(gesture.state == UIGestureRecognizerStateEnded)
{
// lastPoint = [gesture locationInView:self.selfieImage];
}
}
Can anybody please tell me how can i do free-form (Doodle) on images with smooth lines and curves? Thanks in advance and Happy Coding!
I had implemented the same what I had was that I was adding a transparent UIImageView above the UIImageView that had my UIImage I wanted to draw on.
UIImageView *drawableView = [[UIImageView alloc]initWithFrame:self.drawImageView.bounds];
drawableView.userInteractionEnabled = YES;
UIPanGestureRecognizer *panGesture = [[UIPanGestureRecognizer alloc] initWithTarget:self action:#selector(drawingViewDidPan:)];
[drawableView addGestureRecognizer:panGesture];
Then when the user panned on the UIImageView I would call this function
- (void)drawingViewDidPan:(UIPanGestureRecognizer*)sender
{
CGPoint currentDraggingPosition = [sender locationInView:drawableView];
if(sender.state == UIGestureRecognizerStateBegan) {
prevDraggingPosition = currentDraggingPosition;
}
if(sender.state != UIGestureRecognizerStateEnded){
[self drawLine:prevDraggingPosition to:currentDraggingPosition];
}
prevDraggingPosition = currentDraggingPosition;
}
Both prevDraggingPosition and currentDraggingPosition are CGPoint.
Then I used the following function to draw line from the prevDraggingPosition to currentDraggingPosition
-(void)drawLine:(CGPoint)from to:(CGPoint)to
{
#autoreleasepool {
CGSize size = drawableView.frame.size;
UIGraphicsBeginImageContextWithOptions(size, NO, 0.0);
CGContextRef context = UIGraphicsGetCurrentContext();
[drawableView.image drawAtPoint:CGPointZero];
CGFloat strokeWidth = 4.0;
strokeColor = colorChangeView.backgroundColor;
CGContextSetLineWidth(context, strokeWidth);
CGContextSetStrokeColorWithColor(context, strokeColor.CGColor);
CGContextSetLineCap(context, kCGLineCapRound);
CGContextMoveToPoint(context, from.x, from.y);
CGContextAddLineToPoint(context, to.x, to.y);
CGContextStrokePath(context);
drawableView.image = UIGraphicsGetImageFromCurrentImageContext();
UIGraphicsEndImageContext();
}
}
Then finally to get the image with the drawing on it you can build the image by drawing the drawableView image onto your UIImageView that has your image like this.
- (UIImage*)buildImage
{
#autoreleasepool {
UIGraphicsBeginImageContextWithOptions(originalImageSize, NO, self.drawImageView.image.scale);
[self.drawImageView.image drawAtPoint:CGPointZero];
[drawableView.image drawInRect:CGRectMake(0,0, originalImageSize.width, originalImageSize.height)];
UIImage *tmp = UIGraphicsGetImageFromCurrentImageContext();
UIGraphicsEndImageContext();
return tmp;
}
}
Where originalImageSize is the size of your image.
Hope this helps!
Related
I need to build a crop image and let the user pinch in\out and rotate the image.
I have the parent UIView that hold the UIImageView.
parent UIView is clipsToBounds = YES
the inner UIImageView is with contentMode = UIViewContentModeScaleAspectFill;
I don't understand how to calculate the crop rect.
this the parent view before pick image :
this is after the UIImageView add to parentView with frame (0,0,parent.width ,parent.height) and with UIViewContentModeScaleAspectFill.
this is after pinch in :
this is my crop code.
-(UIImage*)createImageToPost:(UIImageView*)imageView withParentView:(UIView*)paretView
{
CGRect rectToCrop = CGRectMake(paretView.frame.origin.x - imageView.frame.origin.x , paretView.frame.origin.y - imageView.frame.origin.y, fabs(paretView.frame.size.width - imageView.image.size.width), fabs(paretView.frame.size.height - imageView.image.size.height));
UIImage * newCropImage = [self cropWithImageView:imageView withCropRect:rectToCrop];
return newCropImage;
}
-(UIImage*)cropWithImageView:(UIImageView*)imageView withCropRect:(CGRect)cropRect
{
CGRect rect = CGRectMake(0, 0, imageView.image.size.width, imageView.image.size.height);
// Begin the drawing
UIGraphicsBeginImageContext(imageView.image.size);
CGContextRef ctx = UIGraphicsGetCurrentContext();
// Clear whole thing
CGContextClearRect(ctx, rect);
// Transform the image (as the image view has been transformed)
CGContextTranslateCTM(ctx, rect.size.width*0.5, rect.size.height*0.5);
CGContextConcatCTM(ctx, imageView.transform);
CGContextTranslateCTM(ctx, -rect.size.width*0.5, -rect.size.height*0.5);
// Tanslate and scale upside-down to compensate for Quartz's inverted coordinate system
CGContextTranslateCTM(ctx, 0.0, rect.size.height);
CGContextScaleCTM(ctx, 1.0, -1.0);
// Draw view into context
CGContextDrawImage(ctx, rect, imageView.image.CGImage);
// Create the new UIImage from the context
UIImage *newImage = UIGraphicsGetImageFromCurrentImageContext();
// End the drawing
UIGraphicsEndImageContext();
// Begin the drawing (again)
UIGraphicsBeginImageContext(cropRect.size);
ctx = UIGraphicsGetCurrentContext();
// Clear whole thing
CGContextClearRect(ctx, CGRectMake(0, 0, cropRect.size.width, cropRect.size.height));
// Translate to compensate for the different positions of the image
CGContextTranslateCTM(ctx, -((newImage.size.width*0.5)-(cropRect.size.width*0.5)),
(newImage.size.height*0.5)-(cropRect.size.height*0.5));
// Tanslate and scale upside-down to compensate for Quartz's inverted coordinate system
CGContextTranslateCTM(ctx, 0.0, cropRect.size.height);
CGContextScaleCTM(ctx, 1.0, -1.0);
// Draw view into context
CGContextDrawImage(ctx, CGRectMake(0,0,newImage.size.width,newImage.size.height), newImage.CGImage);
// Create the new UIImage from the context
newImage = UIGraphicsGetImageFromCurrentImageContext();
// End the drawing
UIGraphicsEndImageContext();
return newImage;
}
UIPinchGestureRecognizer *pinchGestureRecognizer = [[UIPinchGestureRecognizer alloc] initWithTarget:self action:#selector(pinchGestureDetected:)];
[pinchGestureRecognizer setDelegate:self];
[imageView addGestureRecognizer:pinchGestureRecognizer];
// create and configure the rotation gesture
UIRotationGestureRecognizer *rotationGestureRecognizer = [[UIRotationGestureRecognizer alloc] initWithTarget:self action:#selector(rotationGestureDetected:)];
[rotationGestureRecognizer setDelegate:self];
[imageView addGestureRecognizer:rotationGestureRecognizer];
// creat and configure the pan gesture
UIPanGestureRecognizer *panGestureRecognizer = [[UIPanGestureRecognizer alloc] initWithTarget:self action:#selector(panGestureDetected:)];
[panGestureRecognizer setDelegate:self];
[imageView addGestureRecognizer:panGestureRecognizer];
- (void)pinchGestureDetected:(UIPinchGestureRecognizer *)recognizer
{
UIGestureRecognizerState state = [recognizer state];
if (state == UIGestureRecognizerStateBegan || state == UIGestureRecognizerStateChanged)
{
CGFloat scale = [recognizer scale];
[recognizer.view setTransform:CGAffineTransformScale(recognizer.view.transform, scale, scale)];
[recognizer setScale:1.0];
}
}
- (void)rotationGestureDetected:(UIRotationGestureRecognizer *)recognizer
{
UIGestureRecognizerState state = [recognizer state];
if (state == UIGestureRecognizerStateBegan || state == UIGestureRecognizerStateChanged)
{
CGFloat rotation = [recognizer rotation];
[recognizer.view setTransform:CGAffineTransformRotate(recognizer.view.transform, rotation)];
[recognizer setRotation:0];
}
}
- (void)panGestureDetected:(UIPanGestureRecognizer *)recognizer
{
UIGestureRecognizerState state = [recognizer state];
if (state == UIGestureRecognizerStateBegan || state == UIGestureRecognizerStateChanged)
{
CGPoint translation = [recognizer translationInView:recognizer.view];
[recognizer.view setTransform:CGAffineTransformTranslate(recognizer.view.transform, translation.x, translation.y)];
[recognizer setTranslation:CGPointZero inView:recognizer.view];
}
}
this is the image i get after crop:
My top and bottom toolbars are showing as black when in the app they show as light gray. I'm assuming it may have something to do with the default 'translucency checkbox' I found in the .storyboard, attributes inspector of the toolbar?
I'm using a UIActivityViewController to shoot this image out in an email. Here's the code for grabbing the screenshot:
// Grab a screenshot
UIGraphicsBeginImageContext(self.view.bounds.size);
[self.view.layer renderInContext:UIGraphicsGetCurrentContext()];
UIImage *imageToShare = UIGraphicsGetImageFromCurrentImageContext();
UIGraphicsEndImageContext();
Solution found here: iOS: what's the fastest, most performant way to make a screenshot programmatically?
Seems like this should be a factory method provided by an Apple framework though. I'm putting this a Utils.h/.m class:
+ (UIImage *)screenshot
{
CGSize imageSize = CGSizeZero;
UIInterfaceOrientation orientation = [UIApplication sharedApplication].statusBarOrientation;
if (UIInterfaceOrientationIsPortrait(orientation)) {
imageSize = [UIScreen mainScreen].bounds.size;
} else {
imageSize = CGSizeMake([UIScreen mainScreen].bounds.size.height, [UIScreen mainScreen].bounds.size.width);
}
UIGraphicsBeginImageContextWithOptions(imageSize, NO, 0);
CGContextRef context = UIGraphicsGetCurrentContext();
for (UIWindow *window in [[UIApplication sharedApplication] windows]) {
CGContextSaveGState(context);
CGContextTranslateCTM(context, window.center.x, window.center.y);
CGContextConcatCTM(context, window.transform);
CGContextTranslateCTM(context, -window.bounds.size.width * window.layer.anchorPoint.x, -window.bounds.size.height * window.layer.anchorPoint.y);
if (orientation == UIInterfaceOrientationLandscapeLeft) {
CGContextRotateCTM(context, M_PI_2);
CGContextTranslateCTM(context, 0, -imageSize.width);
} else if (orientation == UIInterfaceOrientationLandscapeRight) {
CGContextRotateCTM(context, -M_PI_2);
CGContextTranslateCTM(context, -imageSize.height, 0);
} else if (orientation == UIInterfaceOrientationPortraitUpsideDown) {
CGContextRotateCTM(context, M_PI);
CGContextTranslateCTM(context, -imageSize.width, -imageSize.height);
}
if ([window respondsToSelector:#selector(drawViewHierarchyInRect:afterScreenUpdates:)]) {
[window drawViewHierarchyInRect:window.bounds afterScreenUpdates:YES];
} else {
[window.layer renderInContext:context];
}
CGContextRestoreGState(context);
}
UIImage *image = UIGraphicsGetImageFromCurrentImageContext();
UIGraphicsEndImageContext();
return image;
}
You should probably set the color you on your layer before rendering it:
self.view.backgroundColor = [UIColor whateverColorYouWant];
UIGraphicsBeginImageContext(self.view.bounds.size);
[self.view.layer renderInContext:UIGraphicsGetCurrentContext()];
UIImage *imageToShare = UIGraphicsGetImageFromCurrentImageContext();
UIGraphicsEndImageContext();
Take a look at that anwser
Trying to get an image with rounded corners and stroke,
but there is something I do wrong, because app hangs during this:
- (UIImage *)roundedCornerImage:(NSInteger)radius{
UIGraphicsBeginImageContextWithOptions(self.size, NO, 0);
CGRect box = CGRectInset((CGRect){CGPointZero, self.size}, self.size.width * 0.9f, self.size.height * 0.9f);
UIBezierPath *ballBezierPath = [UIBezierPath bezierPathWithOvalInRect:box];
[[UIColor blackColor] setStroke];
[ballBezierPath setLineWidth:4.0];
[ballBezierPath stroke];
[[UIBezierPath bezierPathWithRoundedRect:(CGRect){CGPointZero, self.size}
cornerRadius:radius]addClip];
[self drawInRect:(CGRect){CGPointZero, self.size}];
UIImage* result = UIGraphicsGetImageFromCurrentImageContext();
UIGraphicsEndImageContext();
return result;
}
Forget messing with the layer, it's a performance killer in scroll views. Just generate a new image instead:
+(UIImage *)makeRoundedImage:(UIImage *)image withRadius:(CGFloat)radius
{
CGRect itemFrame = CGRectMake(0, 0, radius*2, radius*2);
// The source image
UIImageView *imageView = [UIImageView new];
imageView.frame = itemFrame;
imageView.contentMode = UIViewContentModeScaleToFill;
[imageView setImage:image];
imageView.layer.cornerRadius = radius;
imageView.layer.masksToBounds = YES;
// Make an image of our client item
UIGraphicsBeginImageContextWithOptions(itemFrame.size, NO, 0.0);
[imageView.layer renderInContext:UIGraphicsGetCurrentContext()];
UIImage *returnImage = UIGraphicsGetImageFromCurrentImageContext();
UIGraphicsEndImageContext();
// Fini
return returnImage;
}
Just use
[imageView.layer setCornerRadius:5.0f];
[imageView.layer setBorderWidth:5.0f];
Don;t forget to #import Quartzcore;
I'm using BCTabBarController in my app, and I'm trying to customize it so that it uses Core Graphics to highlight the images automatically, so that I don't need four copies of each image. (Retina, Retina-selected, Legacy, Legacy-selected)
User Ephraim has posted a great starting point for this, but it returns legacy sized images. I've played with some of the settings, but I'm not very familiar with Core Graphics, so I'm shooting in the dark.
Ephraim's Code:
- (UIImage *) imageWithBackgroundColor:(UIColor *)bgColor
shadeAlpha1:(CGFloat)alpha1
shadeAlpha2:(CGFloat)alpha2
shadeAlpha3:(CGFloat)alpha3
shadowColor:(UIColor *)shadowColor
shadowOffset:(CGSize)shadowOffset
shadowBlur:(CGFloat)shadowBlur {
UIImage *image = self;
CGColorRef cgColor = [bgColor CGColor];
CGColorRef cgShadowColor = [shadowColor CGColor];
CGFloat components[16] = {1,1,1,alpha1,1,1,1,alpha1,1,1,1,alpha2,1,1,1,alpha3};
CGFloat locations[4] = {0,0.5,0.6,1};
CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB();
CGGradientRef colorGradient = CGGradientCreateWithColorComponents(colorSpace, components, locations, (size_t)4);
CGRect contextRect;
contextRect.origin.x = 0.0f;
contextRect.origin.y = 0.0f;
contextRect.size = [image size];
//contextRect.size = CGSizeMake([image size].width+5,[image size].height+5);
// Retrieve source image and begin image context
UIImage *itemImage = image;
CGSize itemImageSize = [itemImage size];
CGPoint itemImagePosition;
itemImagePosition.x = ceilf((contextRect.size.width - itemImageSize.width) / 2);
itemImagePosition.y = ceilf((contextRect.size.height - itemImageSize.height) / 2);
UIGraphicsBeginImageContext(contextRect.size);
CGContextRef c = UIGraphicsGetCurrentContext();
// Setup shadow
CGContextSetShadowWithColor(c, shadowOffset, shadowBlur, cgShadowColor);
// Setup transparency layer and clip to mask
CGContextBeginTransparencyLayer(c, NULL);
CGContextScaleCTM(c, 1.0, -1.0);
CGContextClipToMask(c, CGRectMake(itemImagePosition.x, -itemImagePosition.y, itemImageSize.width, -itemImageSize.height), [itemImage CGImage]);
// Fill and end the transparency layer
CGContextSetFillColorWithColor(c, cgColor);
contextRect.size.height = -contextRect.size.height;
CGContextFillRect(c, contextRect);
CGContextDrawLinearGradient(c, colorGradient,CGPointZero,CGPointMake(contextRect.size.width*1.0/4.0,contextRect.size.height),0);
CGContextEndTransparencyLayer(c);
//CGPointMake(contextRect.size.width*3.0/4.0, 0)
// Set selected image and end context
UIImage *resultImage = UIGraphicsGetImageFromCurrentImageContext();
UIGraphicsEndImageContext();
CGColorSpaceRelease(colorSpace);
CGGradientRelease(colorGradient);
return resultImage;
}
To implement this code, I've added a category to UIImage in my project, and then made the following changes to BCTab.h:
- (id)initWithIconImageName:(NSString *)imageName {
if (self = [super init]) {
self.adjustsImageWhenHighlighted = NO;
self.background = [UIImage imageNamed:#"BCTabBarController.bundle/tab-background.png"];
self.rightBorder = [UIImage imageNamed:#"BCTabBarController.bundle/tab-right-border.png"];
self.backgroundColor = [UIColor clearColor];
// NSString *selectedName = [NSString stringWithFormat:#"%#-selected.%#",
// [imageName stringByDeletingPathExtension],
// [imageName pathExtension]];
UIImage *defImage = [UIImage imageNamed:imageName];
[self setImage:[defImage imageWithBackgroundColor:[UIColor lightGrayColor] shadeAlpha1:0.4 shadeAlpha2:0.0 shadeAlpha3:0.6 shadowColor:[UIColor blackColor] shadowOffset:CGSizeMake(0.0, -1.0f) shadowBlur:3.0] forState:UIControlStateNormal];
[self setImage:[defImage imageWithBackgroundColor:[UIColor redColor] shadeAlpha1:0.4 shadeAlpha2:0.0 shadeAlpha3:0.6 shadowColor:[UIColor blackColor] shadowOffset:CGSizeMake(0.0, -1.0f) shadowBlur:3.0] forState:UIControlStateSelected];
}
return self;
}
How can I use Ephraim's code to work correctly with Retina display?
After digging around the internet, a Google search lead me back to StackOverflow. I found this answer to this question which discusses a different method which should be used to set the scale factor of the UIImageGraphicsContext when it is initialized.
UIGraphicsBeginImageContext(contextRect.size); needs to be changed to UIGraphicsBeginImageContextWithOptions(contextRect.size, NO, scale);, where "scale" is the
value of the scale you want to use. I grabbed it from [[UIScreen mainScreen] scale].
I am using a uiscrollview to display an UIImageview that is larger then screen size (just like photo viewing in the Photo Library). next i needed to add a point (i am using a UIImageview to show a dot) to the screen when the user taps. got that working.
now what i am trying to do is have that point to stay in the same location relative to the image. UIScrollview has some notifications for when dragging starts and ends, but nothing that gives a constant update of its contentoffset. say i place a dot at X location. i then scroll down 50 pixels, the point needs to move up 50 pixels smoothly (so the point always stays at the same location relative to the image).
does anybody have any ideas?
EDIT: adding code
- (void)viewDidLoad
{
[super viewDidLoad];
//other code...
scrollView.delegate = self;
scrollView.clipsToBounds = YES;
scrollView.decelerationRate = UIScrollViewDecelerationRateFast;
scrollView.minimumZoomScale = (scrollView.frame.size.width / imageView.frame.size.width);
scrollView.maximumZoomScale = 10.0;
//other code...
}
adding a point
-(void) foundTap:(UITapGestureRecognizer *) recognizer
{
CGPoint pixelPos = [recognizer locationInView:self.view];
UIImageView *testPoint = [[UIImageView alloc] initWithImage:[UIImage imageNamed:#"inner-circle.png"]];
testPoint.frame = CGRectMake(0, 0, 20, 20);
testPoint.center = CGPointMake(pixelPos.x, pixelPos.y);
[self.scrollView addSubview:testPoint];
NSMutableArray *tempTestPointArray = [[NSMutableArray alloc] initWithArray:testPointArray];
[tempTestPointArray addObject:testPoint];
testPointArray = tempTestPointArray;
NSLog(#"recorded point %f,%f",pixelPos.x,pixelPos.y);
}
getting pixel points
-(void)scrollViewDidZoom:(UIScrollView *)scrollView
{
NSLog(#"zoom factor: %f", scrollView.zoomScale);
UIPinchGestureRecognizer *test = scrollView.pinchGestureRecognizer;
UIView *piece = test.view;
CGPoint locationInView = [test locationInView:piece];
CGPoint locationInSuperview = [test locationInView:piece.superview];
NSLog(#"locationInView: %# locationInSuperView: %#", NSStringFromCGPoint(locationInView), NSStringFromCGPoint(locationInSuperview));
}
You can implement the UIScrollViewDelegate method scrollViewDidScroll: to get a continuous callback whenever the content offset changes.
Can you not simply add the image view with the dot as a subview of the scroll view?
You can use something like this:
UIGraphicsBeginImageContext(mainimg.frame.size);
[mainimg.image drawInRect:
CGRectMake(0, 0, mainimg.frame.size.width, mainimg.frame.size.height)];
CGContextRef ctx = UIGraphicsGetCurrentContext();
CGContextSetLineCap(ctx, kCGLineCapButt);
CGContextSetLineWidth(ctx,2.0);
CGContextSetRGBStrokeColor(ctx, 1.0, 0.0, 0.0, 1.0);
CGContextBeginPath(ctx);
if (largecursor == FALSE) {
CGContextMoveToPoint(ctx, pt.x-3.0, pt.y);
CGContextAddLineToPoint(ctx, pt.x+3.0,pt.y);
CGContextMoveToPoint(ctx, pt.x, pt.y-3.0);
CGContextAddLineToPoint(ctx, pt.x, pt.y+3.0);
} else {
CGContextMoveToPoint(ctx, 0.0, pt.y);
CGContextAddLineToPoint(ctx, maxx,pt.y);
CGContextMoveToPoint(ctx, pt.x, 0.0);
CGContextAddLineToPoint(ctx, pt.x, maxy);
}
CGContextStrokePath(ctx);
mainimg.image = UIGraphicsGetImageFromCurrentImageContext();
UIGraphicsEndImageContext();