How do I take Cropped screenshot with Retina image quality in my snapshot implementation in swift - screenshot

I am trying to take a screenshot of my UIView and Crop it, save it to my Photo library. As i am trying to do this there are 3 conflicts.
(1) - I want to take Screenshot with Blur in it, As blur filter never gets saved in the screenshot.
(2) - The image quality is very low.
(3) - I am not able to crop the image.
This is my code -
#IBAction func Screenshot(_ sender: UIButton) {
// Declare the snapshot boundaries
let top: CGFloat = 70
let bottom: CGFloat = 400
// The size of the cropped image
let size = CGSize(width: view.frame.size.width, height: view.frame.size.height - top - bottom)
// Start the context
UIGraphicsBeginImageContext(size)
// we are going to use context in a couple of places
let context = UIGraphicsGetCurrentContext()!
// Transform the context so that anything drawn into it is displaced "top" pixels up
// Something drawn at coordinate (0, 0) will now be drawn at (0, -top)
// This will result in the "top" pixels being cut off
// The bottom pixels are cut off because the size of the of the context
context.translateBy(x: 0, y: -top)
// Draw the view into the context (this is the snapshot)
view.drawHierarchy(in: view.bounds, afterScreenUpdates: true)
let snapshot = UIGraphicsGetImageFromCurrentImageContext()
// End the context (this is required to not leak resources)
UIGraphicsEndImageContext()
// Save to photos
UIImageWriteToSavedPhotosAlbum(snapshot!, nil, nil, nil)
}

You said:
I want to take Screenshot with Blur in it, As blur filter never gets saved in the screenshot.
I wonder if the view being snapshotted might not be the one with the UIVisualEffectView as a subview. Because when I use the code at the end of the answer, the blur effect (and the impact of changing the fractionCompleted) is captured.
The image quality is very low.
If you use UIGraphicsBeginImageContextWithOptions with a scale of zero, it should capture the image at the resolution of the device:
UIGraphicsBeginImageContextWithOptions(size, isOpaque, 0)
I am not able to crop the image.
I personally capture the whole view, and then crop as needed. See UIView extension below.
In Swift 3:
class ViewController: UIViewController {
var animator: UIViewPropertyAnimator?
#IBOutlet weak var imageView: UIImageView!
override func viewDidAppear(_ animated: Bool) {
super.viewDidAppear(animated)
let blur = UIBlurEffect(style: .light)
let effectView = UIVisualEffectView(effect: blur)
view.addSubview(effectView)
effectView.translatesAutoresizingMaskIntoConstraints = false
NSLayoutConstraint.activate([
effectView.leadingAnchor.constraint(equalTo: imageView.leadingAnchor),
effectView.trailingAnchor.constraint(equalTo: imageView.trailingAnchor),
effectView.topAnchor.constraint(equalTo: imageView.topAnchor),
effectView.bottomAnchor.constraint(equalTo: imageView.bottomAnchor)
])
animator = UIViewPropertyAnimator(duration: 0, curve: .linear) { effectView.effect = nil }
}
#IBAction func didChangeValueForSlider(_ sender: UISlider) {
animator?.fractionComplete = CGFloat(sender.value)
}
#IBAction func didTapSnapshotButton(_ sender: AnyObject) {
if let snapshot = view.snapshot(of: imageView.frame) {
UIImageWriteToSavedPhotosAlbum(snapshot, nil, nil, nil)
}
}
}
extension UIView {
/// Create snapshot
///
/// - parameter rect: The `CGRect` of the portion of the view to return. If `nil` (or omitted),
/// return snapshot of the whole view.
///
/// - returns: Returns `UIImage` of the specified portion of the view.
func snapshot(of rect: CGRect? = nil) -> UIImage? {
// snapshot entire view
UIGraphicsBeginImageContextWithOptions(bounds.size, isOpaque, 0)
drawHierarchy(in: bounds, afterScreenUpdates: true)
let wholeImage = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext()
// if no `rect` provided, return image of whole view
guard let image = wholeImage, let rect = rect else { return wholeImage }
// otherwise, grab specified `rect` of image
let scale = image.scale
let scaledRect = CGRect(x: rect.origin.x * scale, y: rect.origin.y * scale, width: rect.size.width * scale, height: rect.size.height * scale)
guard let cgImage = image.cgImage?.cropping(to: scaledRect) else { return nil }
return UIImage(cgImage: cgImage, scale: scale, orientation: .up)
}
}
}
Or in Swift 2:
class ViewController: UIViewController {
var animator: UIViewPropertyAnimator?
#IBOutlet weak var imageView: UIImageView!
override func viewDidAppear(animated: Bool) {
super.viewDidAppear(animated)
let blur = UIBlurEffect(style: .Light)
let effectView = UIVisualEffectView(effect: blur)
view.addSubview(effectView)
effectView.translatesAutoresizingMaskIntoConstraints = false
NSLayoutConstraint.activateConstraints([
effectView.leadingAnchor.constraintEqualToAnchor(imageView.leadingAnchor),
effectView.trailingAnchor.constraintEqualToAnchor(imageView.trailingAnchor),
effectView.topAnchor.constraintEqualToAnchor(imageView.topAnchor),
effectView.bottomAnchor.constraintEqualToAnchor(imageView.bottomAnchor)
])
animator = UIViewPropertyAnimator(duration: 0, curve: .Linear) { effectView.effect = nil }
}
#IBAction func didChangeValueForSlider(sender: UISlider) {
animator?.fractionComplete = CGFloat(sender.value)
}
#IBAction func didTapSnapshotButton(sender: AnyObject) {
if let snapshot = view.snapshot(of: imageView.frame) {
UIImageWriteToSavedPhotosAlbum(snapshot, nil, nil, nil)
}
}
}
extension UIView {
/// Create snapshot
///
/// - parameter rect: The `CGRect` of the portion of the view to return. If `nil` (or omitted),
/// return snapshot of the whole view.
///
/// - returns: Returns `UIImage` of the specified portion of the view.
func snapshot(of rect: CGRect? = nil) -> UIImage? {
// snapshot entire view
UIGraphicsBeginImageContextWithOptions(bounds.size, opaque, 0)
drawViewHierarchyInRect(bounds, afterScreenUpdates: true)
let wholeImage = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext()
// if no `rect` provided, return image of whole view
guard let rect = rect, let image = wholeImage else { return wholeImage }
// otherwise, grab specified `rect` of image
let scale = image.scale
let scaledRect = CGRect(x: rect.origin.x * scale, y: rect.origin.y * scale, width: rect.size.width * scale, height: rect.size.height * scale)
guard let cgImage = CGImageCreateWithImageInRect(image.CGImage!, scaledRect) else { return nil }
return UIImage(CGImage: cgImage, scale: scale, orientation: .Up)
}
}
So, when I capture four images at four different slider positions, that yields:

I am not able to crop the image in the right way, As there is navigation bar and status bar showing with blank (White) background. (Rest of the image crops well).
here is the code -
let top: CGFloat = 70
let bottom: CGFloat = 280
// The size of the cropped image
let size = CGSize(width: view.frame.size.width, height: view.frame.size.height - top - bottom)
// Start the context
UIGraphicsBeginImageContext(size)
// we are going to use context in a couple of places
let context = UIGraphicsGetCurrentContext()!
// Transform the context so that anything drawn into it is displaced "top" pixels up
// Something drawn at coordinate (0, 0) will now be drawn at (0, -top)
// This will result in the "top" pixels being cut off
// The bottom pixels are cut off because the size of the of the context
context.translateBy(x: 0, y: 0)
// Draw the view into the context (this is the snapshot)
UIGraphicsBeginImageContextWithOptions(size,view.isOpaque, 0)
self.view.drawHierarchy(in: view.bounds, afterScreenUpdates: true)
let snapshot = UIGraphicsGetImageFromCurrentImageContext()

Related

Presenting modal in iOS 13 with custom height

In iOS 13 there is a new behaviour for modal view controller when being presented.And I found the build-in App Photo presents a smaller model view controller.
How can I present a viewController with a custom size like this,and can slide up to a larger height?
Picture screenshots from system photo app.
Yes it is possible Presenting modal in iOS 13 with custom height.
You just need to add the below code into your Presenting modal
override func updateViewConstraints() {
self.view.frame.size.height = UIScreen.main.bounds.height - 150
self.view.frame.origin.y = 150
self.view.roundCorners(corners: [.topLeft, .topRight], radius: 10.0)
super.updateViewConstraints()
}
extension UIView {
func roundCorners(corners: UIRectCorner, radius: CGFloat) {
let path = UIBezierPath(roundedRect: bounds, byRoundingCorners: corners, cornerRadii: CGSize(width: radius, height: radius))
let mask = CAShapeLayer()
mask.path = path.cgPath
layer.mask = mask
}
}
Answer in Swift
I was looking for a way to replicate that type of ViewController behaviour, albeit with basic UI and have found a rather simple solution.
Basically, you create a ViewController (CardViewContoller) with a transparent background and then add to it a card-like view with a UIPanGestureReconizer, that will enable you to drag it around and dismiss it with the ViewController.
To present you simply call present, setting the modalPresentationStyle to .overCurrentContext and modalTransitionStyle to .coverVertical:
let cardVC = CardViewController()
cardVC.modalPresentationStyle = .overCurrentContext
cardVC.modalTransitionStyle = .coverVertical
present(cardVC, animated: true, completion: nil)
The in CardViewController, which can be created programmatically or using Interface Builder, you add a UIPanGestureRecognizer to your card view (contentView):
let panGestureRecognizer = UIPanGestureRecognizer(target: self, action: #selector(handleDismiss(recognizer:)))
panGestureRecognizer.cancelsTouchesInView = false
contentView.addGestureRecognizer(panGestureRecognizer)
Then just add an #objc function that will respond to the UIPanGestureRecognizer:
#objc
func handleDismiss (recognizer: UIPanGestureRecognizer) {
switch recognizer.state {
case .changed:
viewTranslation = recognizer.translation(in: view)
UIView.animate(withDuration: 0.5, delay: 0, usingSpringWithDamping: 0.7, initialSpringVelocity: 1, options: .curveEaseOut, animations: {
guard self.viewTranslation.y > 0 else {return}
self.view.transform = CGAffineTransform(translationX: 0, y: self.viewTranslation.y)
})
case .ended:
if viewTranslation.y < swipeThreshold {
UIView.animate(withDuration: 0.5, delay: 0, usingSpringWithDamping: 0.7, initialSpringVelocity: 1, options: .curveEaseOut, animations: {
self.view.transform = .identity
})
} else {
dismiss(animated: true, completion: nil)
}
default:
break
}
}
The swipeThreshold is a CGFloat variable with a value of your choosing (200 works great for me), that if the UIPanGestureRecognizer y translation exceeds, will trigger the dismissal of the ViewController along with all the elements.
Likewise, you can add a simple button that will dismiss the ViewController on .touchUpInside calling dismiss()
If you want, you can have a look at this repo, in which I have a sample project that exhibits this behaviour. That way you can build your own totally customisable cards.

Animate a non-UI property in Objective-C (Mac OS X) [duplicate]

On UIView you can change the backgroundColour animated. And on a UISlideView you can change the value animated.
Can you add a custom property to your own UIView subclass so that it can be animated?
If I have a CGPath within my UIView then I can animate the drawing of it by changing the percentage drawn of the path.
Can I encapsulate that animation into the subclass.
i.e. I have a UIView with a CGPath that creates a circle.
If the circle is not there it represents 0%. If the circle is full it represents 100%. I can draw any value by changing the percentage drawn of the path. I can also animate the change (within the UIView subclass) by animating the percentage of the CGPath and redrawing the path.
Can I set some property (i.e. percentage) on the UIView so that I can stick the change into a UIView animateWithDuration block and it animate the change of the percentage of the path?
I hope I have explained what I would like to do well.
Essentially, all I want to do is something like...
[UIView animateWithDuration:1.0
animations:^{
myCircleView.percentage = 0.7;
}
completion:nil];
and the circle path animate to the given percentage.
If you extend CALayer and implement your custom
- (void) drawInContext:(CGContextRef) context
You can make an animatable property by overriding needsDisplayForKey (in your custom CALayer class) like this:
+ (BOOL) needsDisplayForKey:(NSString *) key {
if ([key isEqualToString:#"percentage"]) {
return YES;
}
return [super needsDisplayForKey:key];
}
Of course, you also need to have a #property called percentage. From now on you can animate the percentage property using core animation. I did not check whether it works using the [UIView animateWithDuration...] call as well. It might work. But this worked for me:
CABasicAnimation *animation = [CABasicAnimation animationWithKeyPath:#"percentage"];
animation.duration = 1.0;
animation.fromValue = [NSNumber numberWithDouble:0];
animation.toValue = [NSNumber numberWithDouble:100];
[myCustomLayer addAnimation:animation forKey:#"animatePercentage"];
Oh and to use yourCustomLayer with myCircleView, do this:
[myCircleView.layer addSublayer:myCustomLayer];
Complete Swift 3 example:
public class CircularProgressView: UIView {
public dynamic var progress: CGFloat = 0 {
didSet {
progressLayer.progress = progress
}
}
fileprivate var progressLayer: CircularProgressLayer {
return layer as! CircularProgressLayer
}
override public class var layerClass: AnyClass {
return CircularProgressLayer.self
}
override public func action(for layer: CALayer, forKey event: String) -> CAAction? {
if event == #keyPath(CircularProgressLayer.progress),
let action = action(for: layer, forKey: #keyPath(backgroundColor)) as? CAAnimation,
let animation: CABasicAnimation = (action.copy() as? CABasicAnimation) {
animation.keyPath = #keyPath(CircularProgressLayer.progress)
animation.fromValue = progressLayer.progress
animation.toValue = progress
self.layer.add(animation, forKey: #keyPath(CircularProgressLayer.progress))
return animation
}
return super.action(for: layer, forKey: event)
}
}
/*
* Concepts taken from:
* https://stackoverflow.com/a/37470079
*/
fileprivate class CircularProgressLayer: CALayer {
#NSManaged var progress: CGFloat
let startAngle: CGFloat = 1.5 * .pi
let twoPi: CGFloat = 2 * .pi
let halfPi: CGFloat = .pi / 2
override class func needsDisplay(forKey key: String) -> Bool {
if key == #keyPath(progress) {
return true
}
return super.needsDisplay(forKey: key)
}
override func draw(in ctx: CGContext) {
super.draw(in: ctx)
UIGraphicsPushContext(ctx)
//Light Grey
UIColor.lightGray.setStroke()
let center = CGPoint(x: bounds.midX, y: bounds.midY)
let strokeWidth: CGFloat = 4
let radius = (bounds.size.width / 2) - strokeWidth
let path = UIBezierPath(arcCenter: center, radius: radius, startAngle: 0, endAngle: twoPi, clockwise: true)
path.lineWidth = strokeWidth
path.stroke()
//Red
UIColor.red.setStroke()
let endAngle = (twoPi * progress) - halfPi
let pathProgress = UIBezierPath(arcCenter: center, radius: radius, startAngle: startAngle, endAngle: endAngle , clockwise: true)
pathProgress.lineWidth = strokeWidth
pathProgress.lineCapStyle = .round
pathProgress.stroke()
UIGraphicsPopContext()
}
}
let circularProgress = CircularProgressView(frame: CGRect(x: 0, y: 0, width: 80, height: 80))
UIView.animate(withDuration: 2, delay: 0, options: .curveEaseInOut, animations: {
circularProgress.progress = 0.76
}, completion: nil)
There is a great objc article here, which goes into details about how this works
As well as a objc project that uses the same concepts here:
Essentially action(for layer:) will be called when an object is being animated from an animation block, we can start our own animations with the same properties (stolen from the backgroundColor property) and animate the changes.
For the ones who needs more details on that like I did:
there is a cool example from Apple covering this question.
E.g. thanks to it I found that you don't actually need to add your custom layer as sublayer (as #Tom van Zummeren suggests). Instead it's enough to add a class method to your View class:
+ (Class)layerClass
{
return [CustomLayer class];
}
Hope it helps somebody.
you will have to implement the percentage part yourself. you can override layer drawing code to draw your cgpath accroding to the set percentage value. checkout the core animation programming guide and animation types and timing guide
#David Rees answer get me on the right track, but there is one issue. In my case
completion of animation always returns false, right after animation has began.
UIView.animate(withDuration: 2, delay: 0, options: .curveEaseInOut, animations: {
circularProgress.progress = 0.76
}, completion: { finished in
// finished - always false
})
This is the way it've worked for me - action of animation is handled inside of CALayer.
I have also included small example how to make layer with additional properties like "color".
In this case, without initializer that copies the values, changing the color would take affect only on non-animating view. During animation it would be visble with "default setting".
public class CircularProgressView: UIView {
#objc public dynamic var progress: CGFloat {
get {
return progressLayer.progress
}
set {
progressLayer.progress = newValue
}
}
fileprivate var progressLayer: CircularProgressLayer {
return layer as! CircularProgressLayer
}
override public class var layerClass: AnyClass {
return CircularProgressLayer.self
}
}
/*
* Concepts taken from:
* https://stackoverflow.com/a/37470079
*/
fileprivate class CircularProgressLayer: CALayer {
#NSManaged var progress: CGFloat
let startAngle: CGFloat = 1.5 * .pi
let twoPi: CGFloat = 2 * .pi
let halfPi: CGFloat = .pi / 2
var color: UIColor = .red
// preserve layer properties
// without this specyfic init, if color was changed to sth else
// animation would still use .red
override init(layer: Any) {
super.init(layer: layer)
if let layer = layer as? CircularProgressLayer {
self.color = layer.color
self.progress = layer.progress
}
}
override init() {
super.init()
}
required init?(coder aDecoder: NSCoder) {
super.init(coder: aDecoder)
}
override class func needsDisplay(forKey key: String) -> Bool {
if key == #keyPath(progress) {
return true
}
return super.needsDisplay(forKey: key)
}
override func action(forKey event: String) -> CAAction? {
if event == #keyPath(CircularProgressLayer.progress) {
guard let animation = action(forKey: #keyPath(backgroundColor)) as? CABasicAnimation else {
setNeedsDisplay()
return nil
}
if let presentation = presentation() {
animation.keyPath = event
animation.fromValue = presentation.value(forKeyPath: event)
animation.toValue = nil
} else {
return nil
}
return animation
}
return super.action(forKey: event)
}
override func draw(in ctx: CGContext) {
super.draw(in: ctx)
UIGraphicsPushContext(ctx)
//Light Gray
UIColor.lightGray.setStroke()
let center = CGPoint(x: bounds.midX, y: bounds.midY)
let strokeWidth: CGFloat = 4
let radius = (bounds.size.width / 2) - strokeWidth
let path = UIBezierPath(arcCenter: center, radius: radius, startAngle: 0, endAngle: twoPi, clockwise: true)
path.lineWidth = strokeWidth
path.stroke()
// Red - default
self.color.setStroke()
let endAngle = (twoPi * progress) - halfPi
let pathProgress = UIBezierPath(arcCenter: center, radius: radius, startAngle: startAngle, endAngle: endAngle , clockwise: true)
pathProgress.lineWidth = strokeWidth
pathProgress.lineCapStyle = .round
pathProgress.stroke()
UIGraphicsPopContext()
}
}
The way to handle animations differently and copy layer properties I have found in this article:
https://medium.com/better-programming/make-apis-like-apple-animatable-view-properties-in-swift-4349b2244cea

Stitch/Composite multiple images vertically and save as one image (iOS, objective-c)

I need help writing an objective-c function that will take in an array of UIImages/PNGs, and return/save one tall image of all the images stitched together in order vertically. I am new to this so please go slow and take it easy :)
My ideas so far:
Draw a UIView, then addSubviews to each one's parent (of the images)
and then ???
Below are Swift 3 and Swift 2 examples that stitch images together vertically or horizontally. They use the dimensions of the largest image in the array provided by the caller to determine the common size used for each individual frame each individual image is stitched into.
Note: The Swift 3 example preserves each image's aspect ratio, while the Swift 2 example does not. See note inline below regarding that.
UPDATE: Added Swift 3 example
Swift 3:
import UIKit
import AVFoundation
func stitchImages(images: [UIImage], isVertical: Bool) -> UIImage {
var stitchedImages : UIImage!
if images.count > 0 {
var maxWidth = CGFloat(0), maxHeight = CGFloat(0)
for image in images {
if image.size.width > maxWidth {
maxWidth = image.size.width
}
if image.size.height > maxHeight {
maxHeight = image.size.height
}
}
var totalSize : CGSize
let maxSize = CGSize(width: maxWidth, height: maxHeight)
if isVertical {
totalSize = CGSize(width: maxSize.width, height: maxSize.height * (CGFloat)(images.count))
} else {
totalSize = CGSize(width: maxSize.width * (CGFloat)(images.count), height: maxSize.height)
}
UIGraphicsBeginImageContext(totalSize)
for image in images {
let offset = (CGFloat)(images.index(of: image)!)
let rect = AVMakeRect(aspectRatio: image.size, insideRect: isVertical ?
CGRect(x: 0, y: maxSize.height * offset, width: maxSize.width, height: maxSize.height) :
CGRect(x: maxSize.width * offset, y: 0, width: maxSize.width, height: maxSize.height))
image.draw(in: rect)
}
stitchedImages = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext()
}
return stitchedImages
}
Note: The original Swift 2 example below does not preserve the
aspect ratio (e.g. in the Swift 2 example all images are expanded to
fit in the bounding box that represents the extrema of the widths and
heights of the images, thus Any non-square image can be stretched
disproportionately in one of its dimensions). If you're using Swift 2
and want to preserve the aspect ratio please use the AVMakeRect()
modification from the Swift 3 example. Since I no longer have access
to a Swift 2 playground and can't test it to ensure no errors I'm not
updating the Swift 2 example here for that.
Swift 2: (Doesn't preserve aspect ratio. Fixed in Swift 3 example above)
import UIKit
import AVFoundation
func stitchImages(images: [UIImage], isVertical: Bool) -> UIImage {
var stitchedImages : UIImage!
if images.count > 0 {
var maxWidth = CGFloat(0), maxHeight = CGFloat(0)
for image in images {
if image.size.width > maxWidth {
maxWidth = image.size.width
}
if image.size.height > maxHeight {
maxHeight = image.size.height
}
}
var totalSize : CGSize, maxSize = CGSizeMake(maxWidth, maxHeight)
if isVertical {
totalSize = CGSizeMake(maxSize.width, maxSize.height * (CGFloat)(images.count))
} else {
totalSize = CGSizeMake(maxSize.width * (CGFloat)(images.count), maxSize.height)
}
UIGraphicsBeginImageContext(totalSize)
for image in images {
var rect : CGRect, offset = (CGFloat)((images as NSArray).indexOfObject(image))
if isVertical {
rect = CGRectMake(0, maxSize.height * offset, maxSize.width, maxSize.height)
} else {
rect = CGRectMake(maxSize.width * offset, 0 , maxSize.width, maxSize.height)
}
image.drawInRect(rect)
}
stitchedImages = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext()
}
return stitchedImages
}
The normal way is to create a bitmap image context, draw your images in at the required position, and then get the image from the image context.
You can do this with UIKit, which is somewhat easier, but isn't thread safe, so will need to run in the main thread and will block the UI.
There is loads of example code around for this, but if you want to understand it properly, you should look at UIGraphicsContextBeginImageContext, UIGraphicsGetCurrentContext, UIGraphicsGetImageFromCurrentImageContext and UIImageDrawInRect. Don't forget UIGraphicsPopCurrentContext.
You can also do this with Core Graphics, which is AFAIK, safe to use on background threads ( I've not had a crash from it yet). Efficiency is about the same, as UIKit just uses CG under the hood.
Key words for this are CGBitmapContextCreate, CGContextDrawImage, CGBitmapContextCreateImage, CGContextTranslateCTM, CGContextScaleCTM and CGContextRelease ( no ARC for Core Graphics). The scaling and translating is because CG has the origin in the bottom right hand corner and Y inscreases upwards.
There is also a third way, which is to use CG for the context, but save yourself all the co-ordinate pain by using a CALayer, set your CGImage ( UIImage.CGImage) as the contents and then render the layer to the context. This is still thread safe and lets the layer take care of all the transformations. Keywords for this is - renderInContext:
I know I'm a bit late here but hopefully this can help someone out. If you're trying to create one large image out of an array you can use this method
- (UIImage *)mergeImagesFromArray: (NSArray *)imageArray {
if ([imageArray count] == 0) return nil;
UIImage *exampleImage = [imageArray firstObject];
CGSize imageSize = exampleImage.size;
CGSize finalSize = CGSizeMake(imageSize.width, imageSize.height * [imageArray count]);
UIGraphicsBeginImageContext(finalSize);
for (UIImage *image in imageArray) {
[image drawInRect: CGRectMake(0, imageSize.height * [imageArray indexOfObject: image],
imageSize.width, imageSize.height)];
}
UIImage *finalImage = UIGraphicsGetImageFromCurrentImageContext();
UIGraphicsEndImageContext();
return finalImage;
}
Swift 4
extension Array where Element: UIImage {
func stitchImages(isVertical: Bool) -> UIImage {
let maxWidth = self.compactMap { $0.size.width }.max()
let maxHeight = self.compactMap { $0.size.height }.max()
let maxSize = CGSize(width: maxWidth ?? 0, height: maxHeight ?? 0)
let totalSize = isVertical ?
CGSize(width: maxSize.width, height: maxSize.height * (CGFloat)(self.count))
: CGSize(width: maxSize.width * (CGFloat)(self.count), height: maxSize.height)
let renderer = UIGraphicsImageRenderer(size: totalSize)
return renderer.image { (context) in
for (index, image) in self.enumerated() {
let rect = AVMakeRect(aspectRatio: image.size, insideRect: isVertical ?
CGRect(x: 0, y: maxSize.height * CGFloat(index), width: maxSize.width, height: maxSize.height) :
CGRect(x: maxSize.width * CGFloat(index), y: 0, width: maxSize.width, height: maxSize.height))
image.draw(in: rect)
}
}
}
}
Try this piece of code,I tried stitching two images together n displayed them in an ImageView
UIImage *bottomImage = [UIImage imageNamed:#"bottom.png"]; //first image
UIImage *image = [UIImage imageNamed:#"top.png"]; //foreground image
CGSize newSize = CGSizeMake(209, 260); //size of image view
UIGraphicsBeginImageContext( newSize );
// drawing 1st image
[bottomImage drawInRect:CGRectMake(0,0,newSize.width/2,newSize.height/2)];
// drawing the 2nd image after the 1st
[image drawInRect:CGRectMake(0,newSize.height/2,newSize.width/2,newSize.height/2)] ;
UIImage *newImage = UIGraphicsGetImageFromCurrentImageContext();
UIGraphicsEndImageContext();
join.image = newImage;
join is the name of the imageview and you get to see the images as a single image.
The solutions above were helpful but had a serious flaw for me. The problem is that if the images are of different sizes, the resulting stitched image would have potentially large spaces between the parts. The solution I came up with combines all images right below each other so that it looks like more like a single image no matter the individual image sizes.
For Swift 3.x
static func combineImages(images:[UIImage]) -> UIImage
{
var maxHeight:CGFloat = 0.0
var maxWidth:CGFloat = 0.0
for image in images
{
maxHeight += image.size.height
if image.size.width > maxWidth
{
maxWidth = image.size.width
}
}
let finalSize = CGSize(width: maxWidth, height: maxHeight)
UIGraphicsBeginImageContext(finalSize)
var runningHeight: CGFloat = 0.0
for image in images
{
image.draw(in: CGRect(x: 0.0, y: runningHeight, width: image.size.width, height: image.size.height))
runningHeight += image.size.height
}
let finalImage = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext()
return finalImage!
}

Flatten CALayer sublayers into one layer

In my app I have one root layer, and many images which are sublayers of rootLayer. I'd like to flatten all the sublayers of rootLayer into one layer/image, that don't have any sublayers. I think I should do this by drawing all sublayers in core graphics context, but I don't know how to do that.
I hope you'll understand me, and sorry for my English.
From your own example for Mac OS X:
CGContextRef myBitmapContext = MyCreateBitmapContext(800,600);
[rootLayer renderInContext:myBitmapContext];
CGImageRef myImage = CGBitmapContextCreateImage(myBitmapContext);
rootLayer.contents = (id) myImage;
rootLayer.sublayers = nil;
CGImageRelease(myImage);
iOS:
UIGraphicsBeginImageContextWithOptions(rootLayer.bounds.size, NO, 0.0);
[rootLayer renderInContext: UIGraphicsGetCurrentContext()];
UIImage *layerImage = UIGraphicsGetImageFromCurrentImageContext();
UIGraphicsEndImageContext();
rootLayer.contents = (id) layerImage.CGImage;
rootLayer.sublayers = nil;
Also note the caveat in the docs:
The Mac OS X v10.5 implementation of
this method does not support the
entire Core Animation composition
model. QCCompositionLayer,
CAOpenGLLayer, and QTMovieLayer layers
are not rendered. Additionally, layers
that use 3D transforms are not
rendered, nor are layers that specify
backgroundFilters, filters,
compositingFilter, or a mask values.
Do you still want the layer to be interactive? If not, call -renderInContext: and show the bitmap context.
I also once considered using render(in: CGContext).
However, I decided not to after reading this:
Important
The OS X v10.5 implementation of this method does not support the entire Core Animation composition model. QCCompositionLayer, CAOpenGLLayer, and QTMovieLayer layers are not rendered. Additionally, layers that use 3D transforms are not rendered, nor are layers that specify backgroundFilters, filters, compositingFilter, or a mask values. Future versions of macOS may add support for rendering these layers and properties.
Seriously, no masks? I say, "WORK, Apple."
For a caveat, it's too huge to ignore, so I had to find a different approach.
solution
extension CALayer {
func flatten(in size: CGSize) -> CALayer {
let flattenedLayer = CALayer(in: size)
flattenedLayer.contents = getCGImage(in: size)
return flattenedLayer
}
}
It converts CALayer into CGImage and make a new CALayer with flattened image as its contents.
Of course, I had to make getCGImage(in: CGSize) as well, using CARenderer.
implementation
import AppKit
import Metal
let device = MTLCreateSystemDefaultDevice()!
let context = CIContext()
var textureDescriptor: MTLTextureDescriptor = {
let textureDescriptor = MTLTextureDescriptor.texture2DDescriptor(pixelFormat: .rgba8Unorm, width: 0, height: 0, mipmapped: false)
textureDescriptor.usage = [MTLTextureUsage.shaderRead, .shaderWrite, .renderTarget]
return textureDescriptor
}()
let maxScaleFactor = NSScreen.screens.reduce(1) { max($0, $1.backingScaleFactor) }
let scaleTransform = CATransform3DMakeScale(maxScaleFactor, maxScaleFactor, 1)
let colorSpace = CGColorSpace(name: CGColorSpace.sRGB)!
extension CALayer {
convenience init(in size: CGSize, with sublayer: CALayer? = nil) {
self.init()
frame.size = size
if let sublayer { addSublayer(sublayer) }
}
func getCGImage(in size: CGSize) -> CGImage {
let ciImage = getCIImage(in: size)
return context.createCGImage(ciImage, from: ciImage.extent)!
}
func getCIImage(in size: CGSize) -> CIImage {
let superlayer = self.superlayer
let ciImage = CALayer(in: size, with: self).ciImage
if let superlayer { superlayer.addSublayer(self) }
return ciImage
}
var ciImage: CIImage {
let width = frame.size.width * maxScaleFactor
let height = frame.size.height * maxScaleFactor
textureDescriptor.width = Int(width)
textureDescriptor.height = Int(height)
let texture: MTLTexture = device.makeTexture(descriptor: textureDescriptor)!
let renderer = CARenderer(mtlTexture: texture)
transform = scaleTransform
frame = .init(origin: .zero, size: .init(width: width, height: height))
renderer.bounds = frame
renderer.layer = self
CATransaction.flush()
CATransaction.commit()
renderer.beginFrame(atTime: 0, timeStamp: nil)
renderer.render()
renderer.endFrame()
return CIImage(mtlTexture: texture, options: [.colorSpace: colorSpace])!
}
}
There are a few things to note:
use of CATransform3D to match resolution of HiDPI display
temporarily added as a new CALayer's sublayer in getCIImage(in: CGSize) for layers like CAShapeLayer with zero frame size; this is also to get the image independent from its frame origin/position
use of CATransaction.flush() to transfer ownership of the layer tree to the CARenderer’s context
extras
Since CALayer can have NSImage as its contents too in macOS 10.6 and later, you can change the function accordingly.
extension CALayer {
func getNSImage(in size: CGSize) -> NSImage {
let ciImage = getCIImage(in: size)
return NSImage(data: ciImage.pngData)!
}
}
extension CIImage {
var pngData: Data { context.pngRepresentation(of: self, format: .RGBA8, colorSpace: colorSpace!)! }
// jpeg, tiff, etc. can be created the same way
}
Coming this far, saving as image is not hard at all,
but using write...Representation(of: CIImage, ...) is probably the easiest way:
extension CALayer {
func saveAsPNG(at url: URL, in size: CGSize? = nil) {
let image = size == nil ? ciImage : getCIImage(in: size!)
return try! context.writePNGRepresentation(of: image, to: url, format: .RGBA8, colorSpace: colorSpace)
}
// jpeg, tiff, etc. can be saved the same way
}

How to implement Magnifier [duplicate]

I would like be able to create a movable magnifier (like the one you have when you copy and paste) in a custom view, for zooming a part of my view.
I have no idea on how to start, do you have any idea?
Thanks in advance for your help :)
We do this in Crosswords. In your drawRect method, mask off a circle (using a monochrome bitmap containing the 'mask' of your magnifying glass) and draw your subject view in there with a 2x scale transform. Then draw a magnifying glass image over that and you're done.
- (void) drawRect: (CGRect) rect {
CGContextRef context = UIGraphicsGetCurrentContext();
CGRect bounds = self.bounds;
CGImageRef mask = [UIImage imageNamed: #"loupeMask"].CGImage;
UIImage *glass = [UIImage imageNamed: #"loupeImage"];
CGContextSaveGState(context);
CGContextClipToMask(context, bounds, mask);
CGContextFillRect(context, bounds);
CGContextScaleCTM(context, 2.0, 2.0);
//draw your subject view here
CGContextRestoreGState(context);
[glass drawInRect: bounds];
}
There is a complete example over here. There is a minor error in the downloaded project but otherwise it works great and does exactly what you need.
I use this code in Swift 3 :
class MagnifyingGlassView: UIView {
var zoom: CGFloat = 2 {
didSet {
setNeedsDisplay()
}
}
weak var readView: UIView?
// MARK: - UIVIew
override init(frame: CGRect) {
super.init(frame: frame)
setupView()
}
required init?(coder aDecoder: NSCoder) {
super.init(coder: aDecoder)
setupView()
}
override func draw(_ rect: CGRect) {
guard let readView = readView else { return }
let magnifiedBounds = magnifyBounds(of: readView, zoom: zoom)
readView.drawHierarchy(in: magnifiedBounds, afterScreenUpdates: false)
}
// MARK: - Private
private func setupView() {
isOpaque = false
backgroundColor = UIColor.clear
}
private func magnifyBounds(of view: UIView, zoom: CGFloat) -> CGRect {
let transform = CGAffineTransform(scaleX: zoom, y: zoom)
var bounds = view.bounds.applying(transform)
bounds.center = view.bounds.center
return view.convert(bounds, to: self)
}
}
extension CGRect {
var center: CGPoint {
get {
return CGPoint(x: origin.x + width / 2, y: origin.y + height / 2)
}
set {
origin.x = newValue.x - width / 2
origin.y = newValue.y - height / 2
}
}
}
You need to call setNeedsDisplay in scrollViewDidScroll: if your read view is a scrollView.