Plot Array as Graph in ImageView - objective-c

How can I plot an array to an imageview as a graph?
I've been testing this in Playground and it works, but how can plot this as an imageview in an actual project?
let sineArraySize = 64
let frequency1 = 4.0
let phase1 = 0.0
let amplitude1 = 2.0
let sineWave = (0..<sineArraySize).map {
amplitude1 * sin(2.0 * M_PI / Double(sineArraySize) * Double($0) * frequency1 + phase1)
}
func plotArrayInPlayground<T>(arrayToPlot:Array<T>, title:String) {
for currentValue in arrayToPlot {
XCPCaptureValue(title, currentValue)
}
}
plotArrayInPlayground(sineWave, "Sine wave 1")

One way you could do this:
// this function creates a plot of an array of doubles where it scales to the provided width and the x-axis is on half height
func plotArray(arr: [Double], width: Double, height: Double) -> NSImage {
if arr.isEmpty { return NSImage() }
let xAxisHeight = height / 2
let increment = width / Double(arr.count)
let image = NSImage(size: NSSize(width: width, height: height))
image.lockFocus()
// set background color
NSColor.whiteColor().set()
NSRectFill(NSRect(x: 0, y: 0, width: width, height: height))
let path = NSBezierPath()
// line width of plot
path.lineWidth = 5
path.moveToPoint(NSPoint(x: 0, y: arr[0] * increment + xAxisHeight))
var i = increment
for value in dropFirst(sineWave) {
path.lineToPoint(NSPoint(x: i, y: value * increment + xAxisHeight))
i += increment
}
// set plot color
NSColor.blueColor().set()
path.stroke()
image.unlockFocus()
return image
}
var imageView = NSImageView()
imageView.image = plotArray(sineWave, 500, 200)
// have fun

Related

SVGPointList length changes using svg.js

Hi I am using the function createPoint to animate a polygon using gsap. I am also using svg.js
If I use vanilla javascript to get the points of the svg with
var polygon = document.querySelector("polygon");
var points = polygon.points;
it returns 3 points which correspond to the number of times the createPoint function is run. This logs out as:
0: SVGPoint {x: 105.30396270751953, y: 143.0928955078125}
1: SVGPoint {x: 348.09027099609375, y: 97.7249984741211}
2: SVGPoint {x: 276.54010009765625, y: 327.56372070}
If I use the svg.js code
const draw = SVG().addTo('body')
var svg = draw.node;
const polygon = draw.polygon().node;
var points = polygon.points;
the same function logs a list of 4 SVGPoints with the first point being {x:0,y:0} even though I am only running the function 3 times. Where is the additional (index 0) svg point coming from? Thanks in advance
0: SVGPoint {x: 0, y: 0}
1: SVGPoint {x: 93.79865264892578, y: 124.19292449951172}
2: SVGPoint {x: 346.3572082519531, y: 97.5942153930664}
3: SVGPoint {x: 227.08517456054688, y: 269.97042846
given the following html
<svg>
<polygon points="">
</svg>
And the code below
TweenLite.defaultEase = Sine.easeInOut;
const draw = SVG().addTo('body')
var svg = draw.node;
const polygon = draw.polygon().node;
var points = polygon.points;
console.log('points',points)
var offset = 75;
createPoint(100, 100);
createPoint(300, 100);
createPoint(300, 300);
// createPoint(100, 300);
function createPoint(x, y) {
var minX = x - offset;
var maxX = x + offset;
var minY = y - offset;
var maxY = y + offset;
var point = points.appendItem(svg.createSVGPoint());
point.x = x;
point.y = y;
moveX();
moveY();
function moveX() {
TweenLite.to(point, random(2, 4), {
x: random(minX, maxX),
delay: random(0.5),
onComplete: moveX
});
}
function moveY() {
TweenLite.to(point, random(2, 4), {
y: random(minY, maxY),
delay: random(0.5),
onComplete: moveY
});
}
}
function random(min, max) {
if (max == null) { max = min; min = 0; }
if (min > max) { var tmp = min; min = max; max = tmp; }
return min + (max - min) * Math.random();
}
Just found out that by adding an empty array to the polygon i.e
const polygon = draw.polygon([]).node
it removes the default {x:0,y:0} object. Dont ask me why :)

iOS Core Graphics - how to render UIImage to sample pixel color with 16 bit components, not 8 bit?

I have a UIImage which comes from ARKit. I want to be able to sample the color of this image with high precision. Search for sample color of UIImage or CGImage returns code like below, which gives me four UInt8 components.
I tried altering the bits per component, but don't know how to adjust other parameters of CGContext to make it render.
How do I specify that CoreGraphics render UIImage using 16 bit color components? (Or something other than UInt8?)
let result = renderer.image { imageRendererContext in
let context = imageRendererContext.cgContext
context.setStrokeColor(UIColor.clear.cgColor)
let maskWidth = Int(mask.size.width)
let maskHeight = Int(mask.size.height)
let colorSpace = CGColorSpaceCreateDeviceRGB()
let bytesPerPixel = 4
let bytesPerRow = bytesPerPixel * maskWidth
let bitsPerComponent = 8
let bitmapInfo: UInt32 = CGImageAlphaInfo.premultipliedLast.rawValue | CGBitmapInfo.byteOrder32Big.rawValue
guard let maskContext = CGContext(data: nil,
width: maskWidth,
height: maskHeight,
bitsPerComponent: bitsPerComponent,
bytesPerRow: bytesPerRow,
space: colorSpace,
bitmapInfo: bitmapInfo),
let maskPointer = maskContext.data?.assumingMemoryBound(to: UInt8.self) else {
return
}
maskContext.draw(maskCGImage, in: CGRect(x: 0, y: 0, width: maskWidth, height: maskHeight))
for x in 0 ..< maskWidth {
for y in 0 ..< maskHeight {
let i = bytesPerRow * Int(y) + bytesPerPixel * Int(x)
let a = CGFloat(maskPointer[i + 3]) / 255.0
let r = (CGFloat(maskPointer[i]) / a) / 255.0
let g = (CGFloat(maskPointer[i + 1]) / a) / 255.0
let b = (CGFloat(maskPointer[i + 2]) / a) / 255.0
}
}
}
Found that as described in the core graphics guide, 16 bits per component and higher is only available on MacOS, not iOS

Setting SKLabelNode to centre of SKSpriteNode Swift

I'm trying to set a SKLabelNode's position to the center of a SKSpriteNode.
I've looked at other questions on this but none of these work with Swift 3.
This is my code:
var letter1background = SKSpriteNode(imageNamed: "letterbackground")
var letter1text = SKLabelNode()
override func didMove(to view: SKView) {
letter1background.position = CGPoint(x: self.size.width / 8, y: self.size.height / 7.5)
letter1background.size = CGSize(width: self.size.width / 8, height: self.size.width / 8)
letter1background.zPosition = -5
self.addChild(letter1background)
letter1text.text = "C"
letter1text.fontName = "Verdana-Bold "
letter1text.fontSize = 28
letter1text.fontColor = UIColor.white
letter1text.zPosition = 0
letter1text.horizontalAlignmentMode = letter1background.center
letter1text.verticalAlignmentMode = letter1background.center
self.letter1background.addChild(letter1text)
}
This code looks like it would work but in Swift 3 I now get this error:
Value of type 'SKSpriteNode' has no member 'center'
I have also tried:
self.letter1text.position = CGPoint(x: letter1background.frame.width/2 - (letter1text.frame.width/2), y: letter1background.frame.height/2 -(letter1text.frame.height/2))
&
letter1text.position = CGPoint(x:letter1background.frame.midX, y:letter1background.frame.midY)
But I still get results like this:
Any ideas?
Your calculations of label's position are wrong. You just have to add a label to the at 0,0 position and it will be centered because of SpriteKit's coordinate system rules (0,0 is at the center of the node).
override func didMove(to view: SKView) {
let label = SKLabelNode(fontNamed: "Arial")
label.text = "C"
label.fontColor = .black
label.verticalAlignmentMode = .center
label.fontSize = 29.0
label.zPosition = 1
let background = SKSpriteNode(color: .green, size: CGSize(width: 50, height: 50))
background.addChild(label)
addChild(background)
}
And you will get this:

Swift 3 and CGContextDrawImage

I want to translate this line to the Swift 3 current syntax code but seems there are some problems:
CGContextDrawImage(context, CGRect(x:0.0,y: 0.0,width: image!.size.width,height: image!.size.height), image!.cgImage)
According to the CoreGraphics.apinotes CGContextDrawImage was converted to CGContext.draw :
Name: CGContextDrawImage
# replaced by draw(_ image: CGImage, in rect: CGRect, byTiling: Bool = false)
SwiftName: CGContext.__draw(self:in:image:)
SwiftPrivate: true
When I try to do :
CGContext.draw(context as! CGImage, in: CGRect(x:0.0, y:0.0, width: image!.size.width, height: image!.size.height), byTiling: false)
Seems there is some simple syntax that disturb the compiler but I cannot see (in fact I receive a typical ambiguous error):
Can anyone help me with this new swift 3 syntax code?
You need to call it as if it's an instance method of CGContext:
context.draw(image!.cgImage!, in: CGRect(x: 0.0,y: 0.0,width: image!.size.width,height: image!.size.height))
Check the latest reference of CGContext.
I have found an another very good solution for this issue which i am using currently. You just need to pass the image as an arugument to this method after capturing image using UIImagePickerController. It works well for all version of iOS and also for both portrait and landscape orientations of Camera. It checks for EXIF property of image using UIImageOrientaiton and accordind to the value of orientation, it transforms & scales the image so you will get the same return image with same orientation as your camera view orientation.
Here i have kept maximum resolutions of 3000 so that the image quality doesn't get spoiled specially while you are using retina devices but you can change its resolution as per your requirement.
func scaleAndRotateImage(image: UIImage, MaxResolution iIntMaxResolution: Int) -> UIImage {
let kMaxResolution = iIntMaxResolution
let imgRef = image.cgImage!
let width: CGFloat = CGFloat(imgRef.width)
let height: CGFloat = CGFloat(imgRef.height)
var transform = CGAffineTransform.identity
var bounds = CGRect.init(x: 0, y: 0, width: width, height: height)
if Int(width) > kMaxResolution || Int(height) > kMaxResolution {
let ratio: CGFloat = width / height
if ratio > 1 {
bounds.size.width = CGFloat(kMaxResolution)
bounds.size.height = bounds.size.width / ratio
}
else {
bounds.size.height = CGFloat(kMaxResolution)
bounds.size.width = bounds.size.height * ratio
}
}
let scaleRatio: CGFloat = bounds.size.width / width
let imageSize = CGSize.init(width: CGFloat(imgRef.width), height: CGFloat(imgRef.height))
var boundHeight: CGFloat
let orient = image.imageOrientation
// The output below is limited by 1 KB.
// Please Sign Up (Free!) to remove this limitation.
switch orient {
case .up:
//EXIF = 1
transform = CGAffineTransform.identity
case .upMirrored:
//EXIF = 2
transform = CGAffineTransform.init(translationX: imageSize.width, y: 0.0)
transform = transform.scaledBy(x: -1.0, y: 1.0)
case .down:
//EXIF = 3
transform = CGAffineTransform.init(translationX: imageSize.width, y: imageSize.height)
transform = transform.rotated(by: CGFloat(Double.pi / 2))
case .downMirrored:
//EXIF = 4
transform = CGAffineTransform.init(translationX: 0.0, y: imageSize.height)
transform = transform.scaledBy(x: 1.0, y: -1.0)
case .leftMirrored:
//EXIF = 5
boundHeight = bounds.size.height
bounds.size.height = bounds.size.width
bounds.size.width = boundHeight
transform = CGAffineTransform.init(translationX: imageSize.height, y: imageSize.width)
transform = transform.scaledBy(x: -1.0, y: 1.0)
transform = transform.rotated(by: CGFloat(Double.pi / 2) / 2.0)
break
default: print("Error in processing image")
}
UIGraphicsBeginImageContext(bounds.size)
let context = UIGraphicsGetCurrentContext()
if orient == .right || orient == .left {
context?.scaleBy(x: -scaleRatio, y: scaleRatio)
context?.translateBy(x: -height, y: 0)
}
else {
context?.scaleBy(x: scaleRatio, y: -scaleRatio)
context?.translateBy(x: 0, y: -height)
}
context?.concatenate(transform)
context?.draw(imgRef, in: CGRect.init(x: 0, y: 0, width: width, height: height))
let imageCopy = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext()
return imageCopy!
}

Changing the Color Space of NSImage

Is there a way I can change the color space of an NSimage or NSBitmapImageRep/CGimage or the like. I am open to any way. Preferably the way photoshop does it.
The problem with the code you see below
CGImageRef CGImageCreateCopyWithColorSpace (
CGImageRef image,
CGColorSpaceRef colorspace
);
is not working because Quartz2D does not support alpha for CMYK images or gray images, it is supported only by RGB images.
What you should do is creating 2 images then combine then in case of a CMYK color space and image with alpha. I have searched a lot about this topic and finally found. May be author of this question doesn't need it anymore but someone else may need it.
Create new NSBitmapImageRep
let imageRep = NSBitmapImageRep(bitmapDataPlanes: nil,
pixelsWide: Int(round(imageSize.width)),
pixelsHigh: Int(round(imageSize.height)),
bitsPerSample: 8,
samplesPerPixel: 4,
hasAlpha: false,
isPlanar: false,
colorSpaceName: NSDeviceCMYKColorSpace,
bitmapFormat: NSBitmapFormat(rawValue: 0),
bytesPerRow: Int(round(imageSize.width) * CGFloat(4)),
bitsPerPixel: 0)
You need to draw your image into the new bitmap
lets say you have colorSpace: NSColorSpace
let context = NSGraphicsContext(bitmapImageRep: imageRep)
NSGraphicsContext.saveGraphicsState()
NSGraphicsContext.setCurrentContext(context)
imageRep.setProperty(NSImageColorSyncProfileData, withValue: colorSpace.ICCProfileData)
// Do your drawing here
NSGraphicsContext.restoreGraphicsState()
After this you will have an imageRep that is the image with correct color space but no alpha (transparency).
You need a mask bitmap. Obtaining mask is tricky.
let imageRep = NSBitmapImageRep(bitmapDataPlanes: nil,
pixelsWide: Int(round(imageSize.width)),
pixelsHigh: Int(round(imageSize.height)),
bitsPerSample: 8,
samplesPerPixel: 1,
hasAlpha: false,
isPlanar: false,
colorSpaceName: NSDeviceWhiteColorSpace,
bitmapFormat: NSBitmapFormat(rawValue: 0),
bytesPerRow: Int(round(imageSize.width)),
bitsPerPixel: 0)
Clip white image to your image with transparency
if let graphicsPort = NSGraphicsContext.currentContext()?.graphicsPort {
let context = unsafeBitCast(graphicsPort, CGContextRef.self)
let imgRect = NSRect(origin: NSPoint.zero, size: image.extent.size)
let ciContext = CIContext()
let cgImage = ciContext.createCGImage(image, fromRect: image.extent)
CGContextClipToMask(context, imgRect, cgImage)
}
Colorize all pixel in white color, this will be clipped to image.
let context = NSGraphicsContext(bitmapImageRep: imageRep)
NSGraphicsContext.saveGraphicsState()
NSGraphicsContext.setCurrentContext(context)
imageRep.setProperty(NSImageColorSyncProfileData, withValue: colorSpace.ICCProfileData)
NSColor.whiteColor().setFill()
NSBezierPath.fillRect(NSRect(origin: NSPoint.zero, size: imageSize))
NSGraphicsContext.restoreGraphicsState()
At this step you have a image and a mask. Next we need to combine them. I am using this little algorithm (you need to verify that mask and source image have same sizes and same colorSpaceModel):
func createCMYKAImageRepByApplingAlphaMask(srcImageRep: NSBitmapImageRep, alphaMaskImageRep alphaMask: NSBitmapImageRep) -> NSBitmapImageRep? {
if canApplyMaskRepOnImageRep(srcImageRep, maskRep: alphaMask) == false {
return nil
}
let alphaData = alphaMask.bitmapData
let srcData = srcImageRep.bitmapData
if let imageWithAlphaMaskRep = createEmptyCMYKABitmapImageRep(alphaMask.size) {
if let colorSpaceData = imageColorSpace?.ICCProfileData {
imageWithAlphaMaskRep.setProperty(NSImageColorSyncProfileData, withValue: colorSpaceData)
}
fillPixelsWithComponentsData(imageWithAlphaMaskRep, components: { (pixelIdx: Int) -> (UInt8, UInt8, UInt8, UInt8, UInt8) in
let cyan = srcData[pixelIdx * 4 + 0]
let magenta = srcData[pixelIdx * 4 + 1]
let yellow = srcData[pixelIdx * 4 + 2]
let black = srcData[pixelIdx * 4 + 3]
let alpha = alphaData[pixelIdx]
return (cyan, magenta, yellow, black, alpha)
})
return imageWithAlphaMaskRep
}
return nil
}
func createEmptyBitmapImageRep() -> NSBitmapImageRep? {
let imageRep = NSBitmapImageRep(bitmapDataPlanes: nil,
pixelsWide: Int(round(imageSize.width)),
pixelsHigh: Int(round(imageSize.height)),
bitsPerSample: 8,
samplesPerPixel: 5,
hasAlpha: true,
isPlanar: false,
colorSpaceName: NSDeviceCMYKColorSpace,
bitmapFormat: NSBitmapFormat(rawValue: 0),
bytesPerRow: Int(round(imageSize.width) * CGFloat(5)),
bitsPerPixel: 0)
return imageRep
}
private func fillPixelsWithComponentsData(imgRep: NSBitmapImageRep,
components: (Int) -> (cyan:UInt8, magenta:UInt8, yellow:UInt8, black:UInt8, alpha:UInt8)) {
let imageRawData = imgRep.bitmapData
let imageWidth = Int(imgRep.size.width)
let imageHeight = Int(imgRep.size.height)
for pixelIdx in 0 ..< (imageWidth * imageHeight) {
let (cyan, magenta, yellow, black, alpha) = components(pixelIdx)
let fAlpha = Float(alpha) / 255
imageRawData[pixelIdx * 5 + 0] = UInt8(Float(cyan) * fAlpha)
imageRawData[pixelIdx * 5 + 1] = UInt8(Float(magenta) * fAlpha)
imageRawData[pixelIdx * 5 + 2] = UInt8(Float(yellow) * fAlpha)
imageRawData[pixelIdx * 5 + 3] = UInt8(Float(black) * fAlpha)
imageRawData[pixelIdx * 5 + 4] = alpha
}
}
CGImageRef CGImageCreateCopyWithColorSpace (
CGImageRef image,
CGColorSpaceRef colorspace
);
To get a CGColorSpaceRef, you can do things like
CGColorSpaceRef CGColorSpaceCreateDeviceRGB();
OR see this page for how to create other color spaces.