Set contents of CALayer to animated GIF? - objective-c

Is it possible to set the contents of a CALayer to an animated GIF and have it display that animation? I know that I can set the contents to an image like so:
CALayer* subLayer = [CALayer layer];
NSImage *image = [[NSImage alloc] initWithData:data];
subLayer.contents = image;
And the image will show, but if it's animated, the animation will not display. Is the only solution to get the individual frames for the GIF, get the frame rate, then change the content of the sublayer according to the frame rate? Or is there a much simpler method that I'm overlooking?

Swift 3 version, but changed to receive URL (for my own purpose).
func createGIFAnimation(url:URL) -> CAKeyframeAnimation?{
guard let src = CGImageSourceCreateWithURL(url as CFURL, nil) else { return nil }
let frameCount = CGImageSourceGetCount(src)
// Total loop time
var time : Float = 0
// Arrays
var framesArray = [AnyObject]()
var tempTimesArray = [NSNumber]()
// Loop
for i in 0..<frameCount {
// Frame default duration
var frameDuration : Float = 0.1;
let cfFrameProperties = CGImageSourceCopyPropertiesAtIndex(src, i, nil)
guard let framePrpoerties = cfFrameProperties as? [String:AnyObject] else {return nil}
guard let gifProperties = framePrpoerties[kCGImagePropertyGIFDictionary as String] as? [String:AnyObject]
else { return nil }
// Use kCGImagePropertyGIFUnclampedDelayTime or kCGImagePropertyGIFDelayTime
if let delayTimeUnclampedProp = gifProperties[kCGImagePropertyGIFUnclampedDelayTime as String] as? NSNumber {
frameDuration = delayTimeUnclampedProp.floatValue
}
else{
if let delayTimeProp = gifProperties[kCGImagePropertyGIFDelayTime as String] as? NSNumber {
frameDuration = delayTimeProp.floatValue
}
}
// Make sure its not too small
if frameDuration < 0.011 {
frameDuration = 0.100;
}
// Add frame to array of frames
if let frame = CGImageSourceCreateImageAtIndex(src, i, nil) {
tempTimesArray.append(NSNumber(value: frameDuration))
framesArray.append(frame)
}
// Compile total loop time
time = time + frameDuration
}
var timesArray = [NSNumber]()
var base : Float = 0
for duration in tempTimesArray {
timesArray.append(NSNumber(value: base))
base.add( duration.floatValue / time )
}
// From documentation of 'CAKeyframeAnimation':
// the first value in the array must be 0.0 and the last value must be 1.0.
// The array should have one more entry than appears in the values array.
// For example, if there are two values, there should be three key times.
timesArray.append(NSNumber(value: 1.0))
// Create animation
let animation = CAKeyframeAnimation(keyPath: "contents")
animation.beginTime = AVCoreAnimationBeginTimeAtZero
animation.duration = CFTimeInterval(time)
animation.repeatCount = Float.greatestFiniteMagnitude;
animation.isRemovedOnCompletion = false
animation.fillMode = kCAFillModeForwards
animation.values = framesArray
animation.keyTimes = timesArray
//animation.timingFunction = CAMediaTimingFunction(name: kCAMediaTimingFunctionLinear)
animation.calculationMode = kCAAnimationDiscrete
return animation;
}
Important thing is written in the documentation of 'CAKeyframeAnimation':
the first value in the array must be 0.0 and the last value must be 1.0.
The array should have one more entry than appears in the values array.
For example, if there are two values, there should be three key times.
So added this line:
timesArray.append(NSNumber(value: 1.0))
And also checked in exporting as video with AVAssetExportSession.
Please call like this:
if let url = Bundle.main.url(forResource: "animation", withExtension: "gif"){
let animation = createGIFAnimation(url: url)
}

Well, okay, I guess this isn't that hard to do. Basically I check to see if the image is a GIF:
if ([format isEqualToString:#"gif"]){
CAKeyframeAnimation *animation = [self createGIFAnimation:data];
[subLayer addAnimation:animation forKey:#"contents"];
}
And as you can see, I'm calling my custom createGIFAnimation method, which looks like this:
- (CAKeyframeAnimation *)createGIFAnimation:(NSData *)data{
NSBitmapImageRep *rep = [[NSBitmapImageRep alloc] initWithData:data];
CGImageSourceRef src = CGImageSourceCreateWithData((__bridge CFDataRef)(data), nil);
NSNumber *frameCount = [rep valueForProperty:#"NSImageFrameCount"];
// Total loop time
float time = 0;
// Arrays
NSMutableArray *framesArray = [NSMutableArray array];
NSMutableArray *tempTimesArray = [NSMutableArray array];
// Loop
for (int i = 0; i < frameCount.intValue; i++){
// Frame default duration
float frameDuration = 0.1f;
// Frame duration
CFDictionaryRef cfFrameProperties = CGImageSourceCopyPropertiesAtIndex(src,i,nil);
NSDictionary *frameProperties = (__bridge NSDictionary*)cfFrameProperties;
NSDictionary *gifProperties = frameProperties[(NSString*)kCGImagePropertyGIFDictionary];
// Use kCGImagePropertyGIFUnclampedDelayTime or kCGImagePropertyGIFDelayTime
NSNumber *delayTimeUnclampedProp = gifProperties[(NSString*)kCGImagePropertyGIFUnclampedDelayTime];
if(delayTimeUnclampedProp) {
frameDuration = [delayTimeUnclampedProp floatValue];
} else {
NSNumber *delayTimeProp = gifProperties[(NSString*)kCGImagePropertyGIFDelayTime];
if(delayTimeProp) {
frameDuration = [delayTimeProp floatValue];
}
}
// Make sure its not too small
if (frameDuration < 0.011f){
frameDuration = 0.100f;
}
[tempTimesArray addObject:[NSNumber numberWithFloat:frameDuration]];
// Release
CFRelease(cfFrameProperties);
// Add frame to array of frames
CGImageRef frame = CGImageSourceCreateImageAtIndex(src, i, nil);
[framesArray addObject:(__bridge id)(frame)];
// Compile total loop time
time = time + frameDuration;
}
NSMutableArray *timesArray = [NSMutableArray array];
float base = 0;
for (NSNumber* duration in tempTimesArray){
//duration = [NSNumber numberWithFloat:(duration.floatValue/time) + base];
base = base + (duration.floatValue/time);
[timesArray addObject:[NSNumber numberWithFloat:base]];
}
// Create animation
CAKeyframeAnimation* animation = [CAKeyframeAnimation animationWithKeyPath:#"contents"];
animation.duration = time;
animation.repeatCount = HUGE_VALF;
animation.removedOnCompletion = NO;
animation.fillMode = kCAFillModeForwards;
animation.values = framesArray;
animation.keyTimes = timesArray;
animation.timingFunction = [CAMediaTimingFunction functionWithName:kCAMediaTimingFunctionLinear];
animation.calculationMode = kCAAnimationDiscrete;
return animation;
}
Pretty straightforward, although I would love some tips on how to improve it.

- (CAKeyframeAnimation *)createGIFAnimation:(NSData *)data{
CGImageSourceRef src = CGImageSourceCreateWithData((__bridge CFDataRef)(data), nil);
int frameCount =(int) CGImageSourceGetCount(src);
// Total loop time
float time = 0;
// Arrays
NSMutableArray *framesArray = [NSMutableArray array];
NSMutableArray *tempTimesArray = [NSMutableArray array];
// Loop
for (int i = 0; i < frameCount; i++){
// Frame default duration
float frameDuration = 0.1f;
// Frame duration
CFDictionaryRef cfFrameProperties = CGImageSourceCopyPropertiesAtIndex(src,i,nil);
NSDictionary *frameProperties = (__bridge NSDictionary*)cfFrameProperties;
NSDictionary *gifProperties = frameProperties[(NSString*)kCGImagePropertyGIFDictionary];
// Use kCGImagePropertyGIFUnclampedDelayTime or kCGImagePropertyGIFDelayTime
NSNumber *delayTimeUnclampedProp = gifProperties[(NSString*)kCGImagePropertyGIFUnclampedDelayTime];
if(delayTimeUnclampedProp) {
frameDuration = [delayTimeUnclampedProp floatValue];
} else {
NSNumber *delayTimeProp = gifProperties[(NSString*)kCGImagePropertyGIFDelayTime];
if(delayTimeProp) {
frameDuration = [delayTimeProp floatValue];
}
}
// Make sure its not too small
if (frameDuration < 0.011f){
frameDuration = 0.100f;
}
[tempTimesArray addObject:[NSNumber numberWithFloat:frameDuration]];
// Release
CFRelease(cfFrameProperties);
// Add frame to array of frames
CGImageRef frame = CGImageSourceCreateImageAtIndex(src, i, nil);
[framesArray addObject:(__bridge id)(frame)];
// Compile total loop time
time = time + frameDuration;
}
NSMutableArray *timesArray = [NSMutableArray array];
float base = 0;
for (NSNumber* duration in tempTimesArray){
//duration = [NSNumber numberWithFloat:(duration.floatValue/time) + base];
base = base + (duration.floatValue/time);
[timesArray addObject:[NSNumber numberWithFloat:base]];
}
// Create animation
CAKeyframeAnimation* animation = [CAKeyframeAnimation animationWithKeyPath:#"contents"];
animation.duration = time;
animation.repeatCount = HUGE_VALF;
animation.removedOnCompletion = NO;
animation.fillMode = kCAFillModeForwards;
animation.values = framesArray;
animation.keyTimes = timesArray;
animation.timingFunction = [CAMediaTimingFunction functionWithName:kCAMediaTimingFunctionLinear];
animation.calculationMode = kCAAnimationDiscrete;
return animation;
}
according to mahal its should be done

Related

copyNextSampleBuffer returns NULL when reading two-video-track movie second time

I have a movie with two video tracks. I need to read frames from the two tracks interleaved - that is, one frame from track 1, then one frame from track 2, then from track 1 and so on.
I'm using an AVAsset with two AVAssetReaders to read the frames and it works; however, if I deallocate everything and try a second time, copyNextSampleBuffer returns null on the first frame, and the error is -11829 (Cannot Open). This doesn't happen if I try a single-track movie, or if I try reading just a single track from the same movie.
This is my stripped-down code:
NSURL *url = /* url to movie */;
NSError *localError = nil;
AVAssetReader *assetReader[2];
AVAssetReaderOutput *assetReaderOutput[2];
NSDictionary *assetOptions = [NSDictionary dictionaryWithObject:[NSNumber numberWithBool:YES] forKey:AVURLAssetPreferPreciseDurationAndTimingKey];
NSDictionary *decompressionVideoSettings = [NSDictionary dictionaryWithObjectsAndKeys:
[NSNumber numberWithUnsignedInt:kCVPixelFormatType_32BGRA], (id)kCVPixelBufferPixelFormatTypeKey,
[NSDictionary dictionary], (id)kCVPixelBufferIOSurfacePropertiesKey,
nil];
for (int loop = 0; loop < 2; ++loop)
{
AVAsset *asset = [AVURLAsset URLAssetWithURL:url options:assetOptions];
float durationInSeconds = CMTimeGetSeconds(asset.duration);
NSArray *videoTracks = [asset tracksWithMediaType:AVMediaTypeVideo];
NSUInteger numTracks = [videoTracks count];
float framesPerSecond = [videoTracks[0] nominalFrameRate];
int numFrames = (int)(durationInSeconds * framesPerSecond);
for (NSUInteger trackNum = 0; trackNum < numTracks; ++trackNum)
{
assetReader[trackNum] = [[AVAssetReader alloc] initWithAsset:asset error:&localError];
assetReaderOutput[trackNum] = [AVAssetReaderTrackOutput assetReaderTrackOutputWithTrack:videoTracks[trackNum] outputSettings:decompressionVideoSettings];
[assetReader[trackNum] addOutput:assetReaderOutput[trackNum]];
[assetReader[trackNum] startReading];
}
for (int frameNum = 0; frameNum < numFrames; ++frameNum)
{
for (int trackNum = 0; trackNum < numTracks; ++trackNum)
{
CMSampleBufferRef sampleBuffer = [assetReaderOutput[trackNum] copyNextSampleBuffer];
if (sampleBuffer == NULL)
{
NSError *error = [assetReader[trackNum] error];
/* Handle error */
}
else
{
CVImageBufferRef imageBuffer = CMSampleBufferGetImageBuffer(sampleBuffer);
if (imageBuffer && (CFGetTypeID(imageBuffer) == CVPixelBufferGetTypeID()))
{
CVPixelBufferRef pixelBuffer = (CVPixelBufferRef)imageBuffer;
CVPixelBufferLockBaseAddress(pixelBuffer, kCVPixelBufferLock_ReadOnly);
void* ptr = CVPixelBufferGetBaseAddress(pixelBuffer);
CVPixelBufferUnlockBaseAddress(pixelBuffer, kCVPixelBufferLock_ReadOnly);
/* Do something with ptr */
}
CFRelease(sampleBuffer);
sampleBuffer = NULL;
}
}
}
[asset release];
asset = nil;
for (int trackNum = 0; trackNum < numTracks; ++trackNum)
{
[assetReader[trackNum] release];
assetReader[trackNum] = nil;
[assetReaderOutput[trackNum] release];
assetReaderOutput[trackNum] = nil;
}
int userInput;
scanf("%i", &userInput);
}
So on the second iteration of the outer loop, I get to the Handle error code on the first frame of the first track. Just to be clear, this does not happen if, for example, I replace the line
NSUInteger numTracks = [videoTracks count];
with
NSUInteger numTracks = 1;
Any ideas?
This might be connected to this (yet unanswered) question.

How do you animate different Core Animation layers simultaneously?

I'm currently trying to implement an animation using Core Animation on iOS. Right now I can apply multiple animations to a single layer using CAAnimationGroup, but if I want to repeat that across various different layers --- simultaneously --- things get hazy.
Currently it looks like CAAnimationGroup is a singleton class (or at least, I only seem to get one instance, and I allocate using [CAAnimationGroup animation].
Any ideas?
NSMutableDictionary* elementInstanceData = [self.elementInstanceValues objectForKey:elementInstance];
NSArray* animationValues = [elementInstanceData objectForKey:#"animationValues"];
NSArray* keyTimeValues = [elementInstanceData objectForKey:#"keyTimeValues"];
NSNumber* elementDurationTime = elementInstanceData[#"duration"];
if(animationValues != nil)
{
NSMutableArray* transValues = [[NSMutableArray alloc]init];
NSMutableArray* scaleValues = [[NSMutableArray alloc]init];
NSMutableArray* rotateValues = [[NSMutableArray alloc]init];
for(NSDictionary* transforms in animationValues)
{
NSValue* trans = transforms[#"translate"];
[transValues addObject:trans];
NSValue* scale = transforms[#"scale"];
[scaleValues addObject:scale];
NSValue* rotate = transforms[#"rotate"];
[rotateValues addObject:rotate];
}
NSMutableArray* ratioTimeValues = [[NSMutableArray alloc]init];
for(NSNumber* time in keyTimeValues)
{
float percentage = ([time floatValue] / [elementDurationTime floatValue]);
[ratioTimeValues addObject:[NSNumber numberWithFloat:percentage ]];
}
NSCAssert( [ratioTimeValues count] == [transValues count], #"Keytime/value mismatch (times:%d, values: %d)", [ratioTimeValues count], [transValues count]) ;
CAKeyframeAnimation* transAnim = [CAKeyframeAnimation animationWithKeyPath:#"position"];
[transAnim setValues:transValues];
[transAnim setKeyTimes:ratioTimeValues];
transAnim.beginTime = CACurrentMediaTime() + startTime;
transAnim.duration = durationTime;
[allAnimations addObject:transAnim];
CAKeyframeAnimation* scaleAnim = [CAKeyframeAnimation animationWithKeyPath:#"transform"];
[scaleAnim setValues:scaleValues];
[scaleAnim setKeyTimes:ratioTimeValues];
scaleAnim.beginTime = CACurrentMediaTime() + startTime;
scaleAnim.duration = durationTime;
[allAnimations addObject:scaleAnim];
CAKeyframeAnimation* rotAnim = [CAKeyframeAnimation animationWithKeyPath:#"transform.rotation.z"];
[rotAnim setValues:rotateValues];
[rotAnim setKeyTimes:ratioTimeValues];
rotAnim.beginTime = CACurrentMediaTime() + startTime;
rotAnim.duration = durationTime;
[allAnimations addObject:rotAnim];
}
CAAnimationGroup* elementInstanceAnimationGroup = [CAAnimationGroup animation];
elementInstanceAnimationGroup.animations = allAnimations;
elementInstanceAnimationGroup.beginTime = CACurrentMediaTime();
elementInstanceAnimationGroup.duration = [elementDurationTime floatValue];
elementInstanceAnimationGroup.delegate = self;
[elementInstanceData setObject:elementInstanceAnimationGroup forKey:#"animationGroup"];
UIView* targetView = [elementInstanceData objectForKey:#"target"];
[targetView.layer addAnimation:elementInstanceAnimationGroup forKey:nil];
I can apply multiple animations to a single layer using CAAnimationGroup, but if I want to repeat that across various different layers --- simultaneously --- things get hazy.
A single animation group can animate properties of different named sublayers.
For example, suppose I have a layer with two sublayers. The name of the first sublayer is "manny" and the name of the second sublayer is "moe". And let's say I want to animate the position of the two sublayers individually.
Then I use a keyPath of "sublayers.manny.position" for one animation, and a keyPath of "sublayers.moe.position", group them, and attach the group to the superlayer.
You can create an animation or animation group and add it to multiple layers.
CABasicAnimation* fadeIn = [CABasicAnimation animationWithKeyPath:#"opacity"];
fadeIn.duration = 0.35;
fadeIn.fromValue = [NSNumber numberWithDouble:0];
fadeIn.toValue = [NSNumber numberWithDouble:1];
for (int i = 0; i < self.indicatorBars.count; i++) {
[self.indicatorBars[i] addAnimation:fadeIn forKey:#"opacity"];
}

Insert line break using SKLabelNode in SpriteKit

Simple question on how to insert a line break using SKLabelNode class in SpriteKit. I have the following code but it does not work -
SKLabelNode *nerdText = [SKLabelNode labelNodeWithFontNamed:#"Times"];
NSString *st1 = #"Test break";
NSString *st2 = #"I want it to break";
NSString *test = [NSString stringWithFormat:#"%#,\r%#",st1,st2]; //Even tried \n
nerdText.text = test;
nerdText.fontSize = 11;
nerdText.fontColor = [SKColor colorWithRed:0.15 green:0.15 blue:0.3 alpha:1.0];
nerdText.position = CGPointMake(150.0, 250.0);
[self addChild:nerdText];
Please help me out!
I dont think you can, here is a "hack" way to do it
SKNode *nerdText = [SKNode node];
SKLabelNode *a = [SKLabelNode labelNodeWithFontNamed:#"Arial"];
a.fontSize = 16;
a.fontColor = [SKColor yellowColor];
SKLabelNode *b = [SKLabelNode labelNodeWithFontNamed:#"Arial"];
b.fontSize = 16;
b.fontColor = [SKColor yellowColor];
NSString *st1 = #"Line 1";
NSString *st2 = #"Line 2";
b.position = CGPointMake(b.position.x, b.position.y - 20);
a.text = st1;
b.text = st2;
[nerdText addChild:a];
[nerdText addChild:b];
nerdText.position = CGPointMake(150.0, 250.0);
[self addChild:nerdText];
I had the same problem. I created a drop-in replacement for SKLabelNode called DSMultilineLabelNode that supports word wrap, line breaks, etc. The underlying implementation draws the string into a graphics context and then applies that to a texture on an SKSpriteNode.
It's available on GitHub at:
https://github.com/downrightsimple/DSMultilineLabelNode
static func multipleLineText(labelInPut: SKLabelNode) -> SKLabelNode {
let subStrings:[String] = labelInPut.text!.componentsSeparatedByString("\n")
var labelOutPut = SKLabelNode()
var subStringNumber:Int = 0
for subString in subStrings {
let labelTemp = SKLabelNode(fontNamed: labelInPut.fontName)
labelTemp.text = subString
labelTemp.fontColor = labelInPut.fontColor
labelTemp.fontSize = labelInPut.fontSize
labelTemp.position = labelInPut.position
labelTemp.horizontalAlignmentMode = labelInPut.horizontalAlignmentMode
labelTemp.verticalAlignmentMode = labelInPut.verticalAlignmentMode
let y:CGFloat = CGFloat(subStringNumber) * labelInPut.fontSize
print("y is \(y)")
if subStringNumber == 0 {
labelOutPut = labelTemp
subStringNumber++
} else {
labelTemp.position = CGPoint(x: 0, y: -y)
labelOutPut.addChild(labelTemp)
subStringNumber++
}
}
return labelOutPut
}
As of iOS 11/ macOS 10.13, SKLabelNode has a numberOfLines property that behaves in a similar way to the one that UILabel has. By default it's set to 1. If you set it to zero, you can have an unlimited number of lines. See also lineBreakMode and preferredMaxLayoutWidth. I thought it was worth pointing this out here in case anyone arrives at this page before they see the Apple documentation. If your minimum build target is iOS 11/ macOS 10.13, you don't need the helper methods posted above.
Here is another five minute hack by yours truly. It's not too bad.
+(SKSpriteNode*)spritenodecontaininglabelsFromStringcontainingnewlines:(NSString*)text fontname:(NSString*)fontname fontcolor:(NSColor*)colorFont fontsize:(const CGFloat)SIZEFONT verticalMargin:(const CGFloat)VERTICALMARGIN emptylineheight:(const CGFloat)EMPTYLINEHEIGHT {
NSArray* strings = [text componentsSeparatedByString:#"\n"];
//DLog(#"string count: %lu", (unsigned long)strings.count);
NSColor* color = NSColor.clearColor;
#ifdef DEBUG
color = [NSColor colorWithCalibratedRed:1 green:0 blue:0 alpha:0.5];
#endif
SKSpriteNode* spritenode = [SKSpriteNode spriteNodeWithColor:color size:CGSizeMake(0, 0)];
CGFloat totalheight = 0;
CGFloat maxwidth = 0;
NSMutableArray* labels = [NSMutableArray array];
for (NSUInteger i = 0; i < strings.count; i++) {
NSString* str = [strings objectAtIndex:i];
const BOOL ISEMPTYLINE = [str isEqualToString:#""];
if (!ISEMPTYLINE) {
SKLabelNode* label = [SKLabelNode labelNodeWithFontNamed:fontname];
label.text = str;
label.fontColor = colorFont;
label.fontSize = SIZEFONT;
const CGSize SIZEOFLABEL = [label calculateAccumulatedFrame].size;
if (SIZEOFLABEL.width > maxwidth)
maxwidth = SIZEOFLABEL.width;
totalheight += SIZEOFLABEL.height;
[labels addObject:label];
}
else {
totalheight += EMPTYLINEHEIGHT;
[labels addObject:[NSNull null]];
}
if (i + 1 < strings.count)
totalheight += VERTICALMARGIN;
}
spritenode.size = CGSizeMake(maxwidth, totalheight);
//DLog(#"spritenode total size: %#", NSStringFromSize(spritenode.size));
CGFloat y = spritenode.size.height * 0.5;
const CGFloat X = 0;
for (NSUInteger i = 0; i < strings.count; i++) {
id obj = [labels objectAtIndex:i];
if ([obj isKindOfClass:SKLabelNode.class]) {
SKLabelNode* label = obj;
label.verticalAlignmentMode = SKLabelVerticalAlignmentModeTop;
label.position = ccp(X, y);
[spritenode addChild:label];
const CGSize SIZEOFLABEL = [label calculateAccumulatedFrame].size;
y -= SIZEOFLABEL.height;
}
else {
y -= EMPTYLINEHEIGHT;
}
if (i + 1 < labels.count)
y -= VERTICALMARGIN;
}
return spritenode;
}
Btw you will need
static inline CGPoint ccp( CGFloat x, CGFloat y )
{
return CGPointMake(x, y);
}
So after doing a bit of research I learned that SkLabelNode was not intended to have multiline strings involved. Since functionality is limited with SKLabelNode it makes more sense to simply use a UILabel to hold the place of your text. Learning how to smoothly implement UI elements into sprite kit has made life a whole lot easier. UI elements are created programmatically, and added to your scene by using
[self.view addsubview:(your UIelement)];
So all you have to do
1.Initialize an instance of the UIelement in this case a UIlabel
UILabel *label = [[UILabel alloc] initWithFrame:CGRectMake(50, 50, 100, 100)];
label.backgroundColor = [UIColor whiteColor];
label.textColor = [UIColor blackColor];
label.text = #"helllllllllo";
2. After you have created your UIelement just add it TO THE VIEW Using the method described above
3.I have found it important to note that UI elements and SK elements do not interact the same when it comes to positioning. There are some simple methods provided such as convertPointToView:
-(CGPoint)convertPointToView(CGPoint);
To help when it comes to converting points. I hope that helped Good Luck!
I wrote a solution for Swift 3.
An Xcode demo project is available on the open source GitHub project: https://github.com/benmorrow/Multilined-SKLabelNode
Here's the SKLabelNode extension:
extension SKLabelNode {
func multilined() -> SKLabelNode {
let substrings: [String] = self.text!.components(separatedBy: "\n")
return substrings.enumerated().reduce(SKLabelNode()) {
let label = SKLabelNode(fontNamed: self.fontName)
label.text = $1.element
label.fontColor = self.fontColor
label.fontSize = self.fontSize
label.position = self.position
label.horizontalAlignmentMode = self.horizontalAlignmentMode
label.verticalAlignmentMode = self.verticalAlignmentMode
let y = CGFloat($1.offset - substrings.count / 2) * self.fontSize
label.position = CGPoint(x: 0, y: -y)
$0.addChild(label)
return $0
}
}
}
Here's how you use it:
let text = "hot dogs\ncold beer\nteam jerseys"
let singleLineMessage = SKLabelNode()
singleLineMessage.fontSize = min(size.width, size.height) /
CGFloat(text.components(separatedBy: "\n").count) // Fill the screen
singleLineMessage.verticalAlignmentMode = .center // Keep the origin in the center
singleLineMessage.text = text
let message = singleLineMessage.multilined()
message.position = CGPoint(x: frame.midX, y: frame.midY)
message.zPosition = 1001 // On top of all other nodes
addChild(message)
Here's what the app looks like:
The alternative is to create a bitmap version of the text, then use the resulting image with a SKSpriteNode.
It's easier than it sounds.
An example, assume we have a string or an attributed string and a CGSize variable with the size of the resulting text area.
CGColorSpaceRef rgbColorSpace = CGColorSpaceCreateDeviceRGB();
// Assuming size is in actual pixels. Multiply size by the retina scaling
// factor if not.
CGContextRef context = CGBitmapContextCreate(NULL, (size_t)round(size.width), (size_t)round(size.height), 8, (size_t)round(size.width) * 4, rgbColorSpace, (CGBitmapInfo)kCGImageAlphaPremultipliedLast);
CGColorSpaceRelease(rgbColorSpace);
// Draw text, potentially flipping the coordinate system before
// (depending on methods you use).
// Make sure that you draw the font twice as big for retina.
// E.g. [#"My text" drawInRect:rect withAttributes:attr];
// Once we have drawn the text, simply extract the image and
// Make a texture from it.
CGImageRef image = CGBitmapContextCreateImage(context);
SKTexture *texture = [SKTexture textureWithCGImage:image];
CGImageRelease(image);
CGContextRelease(context);
// Texture created, so make a sprite node to use it.
SKSpriteNode *node = [self node];
node.texture = texture;
// Set the node size to the size in non-retina pixels, so if size was with
// scale factor already multiplied in, then we would need to divide by the scale
// factor.
node.size = size;
Here just to contribute my solution. I find myself wanting the same thing - to make multilines of SKLabelNode from a long string. Creating it one by one and manually positioning them is non practical. So I made an easier way to make multiline SKLabelNode. This method uses SKLabelNodes (and not capturing text into image).
Please see my solution if you are interested:
http://xcodenoobies.blogspot.com/2014/12/multiline-sklabelnode-hell-yes-please-xd.html
The result:
Lots of nice solutions here, but I didn't see any written in swift, so here we go. this function will take in one long string, and break it up where you place \n characters.
func createMultiLineText(textToPrint:String, color:UIColor, fontSize:CGFloat, fontName:String, fontPosition:CGPoint, fontLineSpace:CGFloat)->SKNode{
// create node to hold the text block
var textBlock = SKNode()
//create array to hold each line
let textArr = textToPrint.componentsSeparatedByString("\n")
// loop through each line and place it in an SKNode
var lineNode: SKLabelNode
for line: String in textArr {
lineNode = SKLabelNode(fontNamed: fontName)
lineNode.text = line
lineNode.fontSize = fontSize
lineNode.fontColor = color
lineNode.fontName = fontName
lineNode.position = CGPointMake(fontPosition.x,fontPosition.y - CGFloat(textBlock.children.count ) * fontSize + fontLineSpace)
textBlock.addChild(lineNode)
}
// return the sknode with all of the text in it
return textBlock
}
So I know this question is a little older, but just incase any comes back to it like I have, there's now a property preferredMaxLayoutWidth that you can use in conjunction with lineBreakMode and numberOfLines:
Example:
let longMessage = "Super super super super super super super super super long text"
let label = SKLabelNode(fontNamed: "Thonburi")
label.text = longMessage
label.fontSize = 24
label.fontColor = SKColor.black
// set preferredMaxLayoutWidth to the width of the SKScene
label.preferredMaxLayoutWidth = size.width
label.lineBreakMode = .byWordWrapping
label.numberOfLines = 0
addChild(label)
Like several others I have implemented a solution to this problem myself. It's a simple SKLabelNode subclass which can be used as a replacement for the regular SKLabelNode. I find subclassing the best approach for this functionality as I use it "everywhere" "all" the time...
The whole thing is available at github (for anyone interested) but the main gist is as follows: It separates the string and creates regular SKLabelNode instances and ads these as children of the node. This is done whenever setText: is invoked:
- (void)setText:(NSString *)text{
self.subNodes = [self labelNodesFromText:text];
[self removeAllChildren];
for (SKLabelNode *childNode in self.subNodes) {
[self addChild:childNode];
}
_text = #""; // (synthesized in the implementation)
}
The label subnodes are created here:
- (NSArray *)labelNodesFromText:(NSString *)text{
NSArray *substrings = [text componentsSeparatedByString:#"\n"];
NSMutableArray *labelNodes = [[NSMutableArray alloc] initWithCapacity:[substrings count]];
NSUInteger labelNumber = 0;
for (NSString *substring in substrings) {
SKLabelNode *labelNode = [SKLabelNode labelNodeWithFontNamed:self.fontName];
labelNode.text = substring;
labelNode.fontColor = self.fontColor;
labelNode.fontSize = self.fontSize;
labelNode.horizontalAlignmentMode = self.horizontalAlignmentMode;
labelNode.verticalAlignmentMode = self.verticalAlignmentMode;
CGFloat y = self.position.y - (labelNumber * self.fontSize * kLineSpaceMultiplier); // kLineSpaceMultiplier is a float constant. 1.5 is the value I have chosen
labelNode.position = CGPointMake(self.position.x, y);
labelNumber++;
[labelNodes addObject:labelNode];
}
return [labelNodes copy];
}
As you might have noticed I also have a property subNodes (array). This comes in handy elsewhere as the full implementation also allows for changing any of properties with the regular SKLabelNode syntax. (Text, fontName, fontSize, alignment etc.)
If anyone is interested, I've created a better SKLabelNode called SKLabelNodePlus that has multi-line support like Chris Allwein's but also has other features I find pretty useful.
Check it out on GitHub:
https://github.com/MKargin0/SKLabelNodePlus
Using https://github.com/downrightsimple/DSMultilineLabelNode and How to write text on image in Objective-C (iOS)? for reference this is what I did for a quick and dirty way to get a text-wrapping SKNode (Xcode 7.1.1):
-(SKNode*)getWrappingTextNode:(NSString*)text maxWidth:(CGFloat)width {
UIImage *img = [self drawText:text widthDimension:width];
return [SKSpriteNode spriteNodeWithTexture:[SKTexture textureWithImage:img]];
}
-(UIImage*)drawText:(NSString*)text widthDimension:(CGFloat)width {
NSMutableParagraphStyle *paragraphStyle = [[NSParagraphStyle defaultParagraphStyle] mutableCopy];
paragraphStyle.lineBreakMode = NSLineBreakByWordWrapping;
paragraphStyle.alignment = NSTextAlignmentLeft; //or whatever alignment you want
UIFont *font = [UIFont fontWithName:#"Verdana" size:22]; //or whatever font you want
NSDictionary *att = #{NSFontAttributeName:font, NSParagraphStyleAttributeName: paragraphStyle};
//using 800 here but make sure this height is greater than the potential height of the text (unless you want a max-height I guess but I did not test max-height)
CGRect rect = [text boundingRectWithSize:CGSizeMake(width, 800) options:NSStringDrawingUsesLineFragmentOrigin attributes:att context:nil];
UIGraphicsBeginImageContextWithOptions(rect.size, NO, 0.0f);
[text drawInRect:rect withAttributes:att];
UIImage *newImage = UIGraphicsGetImageFromCurrentImageContext();
UIGraphicsEndImageContext();
return newImage;
}
Here is a quick and easy function I wrote to just make life easier.
Step 1) Pass in a string, get a SKSpriteNode.
Step 2) Add sprite node to scene.
/******************************************************************************/
- (SKSpriteNode*) ConvertString: (NSString*) str
WithFontSize: (NSInteger) font_size
ToParagraphWithSize: (CGSize) para_size
{
SKSpriteNode* paragraph = [[SKSpriteNode alloc] initWithColor: [SKColor clearColor]
size: para_size];
// Set the anchor point to the top left corner. This is where English
// paragraphs usually start
paragraph.anchorPoint = CGPointMake(0,1);
// Create an array to hold multilple sub strings. These sub strings will
// become multiple SKLabels that will be added to the paragraph sprite node
// created above
NSMutableArray* str_arr = [[NSMutableArray alloc] init];
// Lets separate words by a single space.
NSArray* word_arr = [str componentsSeparatedByString:#" "];
// 50% is an approximate character height to width ratio. Change this
// number to adjust the number of characters per line you would like.
// Increase it if you have a lot of capitol W's
float est_char_width = font_size * 0.50;
NSInteger num_char_per_line = para_size.width / est_char_width;
// For every word in the original string, make sure it fits on the line
// then add it to the string array.
NSString* temp_str = #"";
for (NSString* word in word_arr)
{
if ((NSInteger)word.length <= num_char_per_line - (NSInteger)temp_str.length)
{
temp_str = [NSString stringWithFormat:#"%# %#", temp_str, word];
}
else
{
[str_arr addObject: temp_str];
temp_str = word;
}
}
[str_arr addObject: temp_str];
// For every sub string, create a label node and add it to the paragraph
for (int i = 0; i < str_arr.count; i++)
{
NSString* sub_str = [str_arr objectAtIndex: i];
SKLabelNode* label = [self CreateLabelWithText: sub_str];
label.fontSize = 14;
label.position = CGPointMake(0, -(i+1) * font_size);
[paragraph addChild: label];
}
return paragraph;
}
/******************************************************************************/
- (SKLabelNode*) CreateLabelWithText: (NSString*) str
{
enum alignment
{
CENTER,
LEFT,
RIGHT
};
SKLabelNode* label;
label = [SKLabelNode labelNodeWithFontNamed:#"ChalkboardSE-Light"];
label.name = #"label_name";
label.text = str;
label.zPosition = 1;
label.horizontalAlignmentMode = LEFT;
label.fontColor = [SKColor whiteColor];
return label;
}
I have written a utility method to take a string and divide it up into an array of strings with a given maximum length. It automatically ends each line with a whole word and removes leading whitespace. Hope it helps somebody!
- (NSArray*)linesFromString:(NSString*)string withMaxLineLength:(int)maxLineLength;
{
NSMutableArray *lines = [NSMutableArray arrayWithCapacity:1];
BOOL gotLine = NO;
BOOL doneFormat = NO;
BOOL endOfString = NO;
int innerLoops = 0;
int outerLoops = 0;
int lineIndex = 0;
int currentStringIndex = 0;
int stringLength = (int)[string length];
int rangeLength = maxLineLength;
NSString *line;
NSString *testChar;
NSString *testChar2;
while (!doneFormat) {
outerLoops++;
while (!gotLine) {
endOfString = NO;
innerLoops++;
line = [string substringWithRange:NSMakeRange(currentStringIndex, rangeLength)];
testChar = [line substringWithRange:NSMakeRange(0, 1)];
if (currentStringIndex + rangeLength > [string length] - 1) {
endOfString = YES;
} else {
testChar2 = [string substringWithRange:NSMakeRange(currentStringIndex + rangeLength, 1)];
}
//If the line starts with a space then advance 1 char and try again.
if ([testChar isEqualToString:#" "]) {
currentStringIndex++;
// If we were at the end of the string then reduce the rangeLength as well.
if (endOfString) {
rangeLength--;
}
// else, if this line ends at the end of a word (or the string) then it's good. ie next char in the string is a space.
} else if ([testChar2 isEqualToString:#" "] || endOfString) {
gotLine = YES;
currentStringIndex += [line length];
// else, make the line shorter by one character and try again
} else if (rangeLength > 1){
rangeLength--;
// Otherwise the word takes up more than 1 line so use it all.
} else {
line = [string substringWithRange:NSMakeRange(currentStringIndex, maxLineLength)];
currentStringIndex += [line length];
gotLine = YES;
}
// Make sure we're not stuck in an endless loop
if (innerLoops > 1000) {
NSLog(#"Error: looped too long");
break;
}
}
// If we processed a line, and the line is not nil, add it to our array.
if (gotLine && line) {
[lines insertObject:line atIndex:lineIndex];
lineIndex++;
}
// Reset variables
rangeLength = maxLineLength;
gotLine = NO;
// If the current index is at the end of the string, then we're done.
if (currentStringIndex >= stringLength) {
doneFormat = YES;
// If we have less than a full line left, then reduce the rangeLength to avoid throwing an exception
} else if (stringLength - (currentStringIndex + rangeLength) < 0) {
rangeLength = stringLength - currentStringIndex;
}
// Make sure we're not stuck in an endless loop
if (outerLoops > 1000) {
NSLog(#"Error: Outer-looped too long");
break;
}
}
return lines;
}
And then I just call it and create some label nodes to add to my layer node as follows. I'm aligning my line labels underneath and with the left edge of button2, so it all lines up left justified.
CGFloat fontSize = 30.0f;
int lineCount;
NSString *description = [product localizedDescription];
NSString *line;
NSArray *lines = [self linesFromString:description withMaxLineLength:43];
if (lines) {
lineCount = (int)[lines count];
for (int i = 0; i < lineCount; i++) {
line = [lines objectAtIndex:i];
// Create a new label for each line and add it to my SKSpriteNode layer
SKLabelNode *label = [SKLabelNode labelNodeWithFontNamed:#"Superclarendon-Black"];
label.text = line;
label.fontSize = fontSize;
label.scale = 1.0f;
label.name = #"lineLabel";
label.fontColor = [UIColor blackColor];
label.horizontalAlignmentMode = SKLabelHorizontalAlignmentModeLeft;
label.position = CGPointMake(button2.position.x - button2.size.width * 0.5f, button2.position.y - button2.size.height - i * fontSize * 1.1);
[layer addChild:label];
}
}
In the scene editor, change the SKLabelNode's text to attributed in the attributes inspector in the right pane, as shown below.
Doing so will give you very much freedom to customize the text that is displayed without having to create multiple SKLabelNode instances or a UIImage. For instance, you can create a paragraph as shown below.
For programmatic interaction, use the attributedString property of the label node to add custom attributes.
label.numberOfLines = 0 //equates to multiple lines
label.numberOfLines.preferredMaxLayoutWidth = screenWidth

How to rapidly create an NSMutableArray with CFDataRef image pixel data in Xcode for iOS

My question is simple, I have the following code, it creates an array of Hues got from a function that returns the UIColor of an image (this is not important, just context). So, I need to create this array as fast as possible, this test runs with only a 5x5 pixels image and it takes about 3sec, I want to be able to run a 50x50 pixels image (at least) in about 2 secods (tops), any ideas?
- (void)createArrayOfHues: (UIImage *)imageScaned{
if (imageScaned != nil) {
NSLog(#"Creating Array...");
UIImageView *img = [[UIImageView alloc] initWithFrame:CGRectMake(0, 0, 5, 5)];
img.contentMode = UIViewContentModeScaleToFill;
img.image = imageScaned;
img.contentMode = UIViewContentModeRedraw;
img.hidden = YES;
int i = 0;
CGFloat hue = 0;
CGFloat sat = 0;
CGFloat brit = 0;
CGFloat alph = 0;
CGFloat hue2 = 0;
CGFloat sat2 = 0;
CGFloat brit2 = 0;
CGFloat alph2 = 0;
[_colorsArray removeAllObjects];
[_satForHue removeAllObjects];
[_britForHue removeAllObjects];
[_alphForHue removeAllObjects];
_colorsArray = [[NSMutableArray alloc] initWithCapacity:(25)];
_satForHue = [[NSMutableArray alloc] initWithCapacity:(25)];
_britForHue = [[NSMutableArray alloc] initWithCapacity:(25)];
_alphForHue = [[NSMutableArray alloc] initWithCapacity:(25)];
while (i<25) {
for (int y=1; y <= 5; y++){
for (int x = 1; x <= 2.5; x++){
if (x != (5-x)){
UIColor *color = [self colorMatch:imageScaned :x :y];
UIColor *color2 = [self colorMatch:imageScaned :(5-x) :y];
if([color getHue:&hue saturation:&sat brightness:&brit alpha:&alph] && [color2 getHue:&hue2 saturation:&sat2 brightness:&brit2 alpha:&alph2]){
NSNumber *hueId = [NSNumber numberWithFloat:(float)hue];
NSNumber *satId = [NSNumber numberWithFloat:(float)sat];
NSNumber *britId = [NSNumber numberWithFloat:(float)brit];
NSNumber *alphId = [NSNumber numberWithFloat:(float)alph];
NSNumber *hueId2 = [NSNumber numberWithFloat:(float)hue2];
NSNumber *satId2 = [NSNumber numberWithFloat:(float)sat2];
NSNumber *britId2 = [NSNumber numberWithFloat:(float)brit2];
NSNumber *alphId2 = [NSNumber numberWithFloat:(float)alph2];
[_colorsArray insertObject:hueId atIndex:i];
[_satForHue insertObject:satId atIndex:i];
[_britForHue insertObject:britId atIndex:i];
[_alphForHue insertObject:alphId atIndex:i];
[_colorsArray insertObject:hueId2 atIndex:(i+1)];
[_satForHue insertObject:satId2 atIndex:(i+1)];
[_britForHue insertObject:britId2 atIndex:(i+1)];
[_alphForHue insertObject:alphId2 atIndex:(i+1)];
}
NSLog(#"color inserted at %i with x: %i and y: %i" , i , x, y);
i++;
}else {
UIColor *color = [self colorMatch:imageScaned :x :y];
if([color getHue:&hue saturation:&sat brightness:&brit alpha:&alph]){
NSNumber *hueId = [NSNumber numberWithFloat:(float)hue];
NSNumber *satId = [NSNumber numberWithFloat:(float)sat];
NSNumber *britId = [NSNumber numberWithFloat:(float)brit];
NSNumber *alphId = [NSNumber numberWithFloat:(float)alph];
[_colorsArray insertObject:hueId atIndex:i];
[_satForHue insertObject:satId atIndex:i];
[_britForHue insertObject:britId atIndex:i];
[_alphForHue insertObject:alphId atIndex:i];
}
}
}
}
}
NSLog(#"Returns the array");
}else{
NSLog(#"Returns nothing");
}
}
The code for colorMatch:
- (UIColor *) colorMatch: (UIImage *)image :(int) x :(int) y {
isBlackColored = NO;
if (image == nil){
NSUserDefaults *defaults = [NSUserDefaults standardUserDefaults];
BOOL customColor = [defaults boolForKey:#"custom_color"];
if (customColor){
float red = [defaults floatForKey:#"custom_color_slider_red"];
float green = [defaults floatForKey:#"custom_color_slider_green"];
float blue = [defaults floatForKey:#"custom_color_slider_blue"];
return [UIColor colorWithRed:red green:green blue:blue alpha:1];
}else
isDefaultS = YES;
}
else{
CFDataRef pixelData = CGDataProviderCopyData(CGImageGetDataProvider(image.CGImage));
const UInt8* data = CFDataGetBytePtr(pixelData);
int pixelInfo = ((image.size.width * y) + x ) * 4;
UInt8 red = data[pixelInfo];
UInt8 green = data[(pixelInfo + 1)];
UInt8 blue = data[pixelInfo + 2];
UInt8 alpha = data[pixelInfo + 3];
CFRelease(pixelData);
float redC = red/255.0f;
float greenC = green/255.0f;
float blueC = blue/255.0f;
UIColor* color = [UIColor colorWithRed:redC green:greenC blue:blueC alpha:alpha/255.0f];
return color;
}
return nil;
}
I think your main performance bottleneck is not the initialization of NSMutableArray instances, but the way you index your image:
UIColor *color = [self colorMatch:imageScaned :x :y];
I guess this method converts the UIImage to a CGImageRef, copies its data, indexes it, then destroys/releases these temporary objects, or something like this - for every single pixel...
You should refactor this code to get hold of the image buffer only once, and then work with it like a regular C pointer/array. If that doesn't solve your performance problem, you should do some profiling.

WaveForm on IOS

I'm looking for how to draw the sound amplitude.
I found http://supermegaultragroovy.com/2009/10/06/drawing-waveforms/ but i have some problems. How get a list of floating-point values representing the audio?
Thank all.
I found this example here: Drawing waveform with AVAssetReader , changed it and developed a new class based on.
This class returns UIImageView.
//.h file
#import <UIKit/UIKit.h>
#interface WaveformImageVew : UIImageView{
}
-(id)initWithUrl:(NSURL*)url;
- (NSData *) renderPNGAudioPictogramLogForAssett:(AVURLAsset *)songAsset;
#end
//.m file
#import "WaveformImageVew.h"
#define absX(x) (x<0?0-x:x)
#define minMaxX(x,mn,mx) (x<=mn?mn:(x>=mx?mx:x))
#define noiseFloor (-50.0)
#define decibel(amplitude) (20.0 * log10(absX(amplitude)/32767.0))
#define imgExt #"png"
#define imageToData(x) UIImagePNGRepresentation(x)
#implementation WaveformImageVew
-(id)initWithUrl:(NSURL*)url{
if(self = [super init]){
AVURLAsset * urlA = [AVURLAsset URLAssetWithURL:url options:nil];
[self setImage:[UIImage imageWithData:[self renderPNGAudioPictogramLogForAssett:urlA]]];
}
return self;
}
-(UIImage *) audioImageLogGraph:(Float32 *) samples
normalizeMax:(Float32) normalizeMax
sampleCount:(NSInteger) sampleCount
channelCount:(NSInteger) channelCount
imageHeight:(float) imageHeight {
CGSize imageSize = CGSizeMake(sampleCount, imageHeight);
UIGraphicsBeginImageContext(imageSize);
CGContextRef context = UIGraphicsGetCurrentContext();
CGContextSetFillColorWithColor(context, [UIColor blackColor].CGColor);
CGContextSetAlpha(context,1.0);
CGRect rect;
rect.size = imageSize;
rect.origin.x = 0;
rect.origin.y = 0;
CGColorRef leftcolor = [[UIColor whiteColor] CGColor];
CGColorRef rightcolor = [[UIColor redColor] CGColor];
CGContextFillRect(context, rect);
CGContextSetLineWidth(context, 1.0);
float halfGraphHeight = (imageHeight / 2) / (float) channelCount ;
float centerLeft = halfGraphHeight;
float centerRight = (halfGraphHeight*3) ;
float sampleAdjustmentFactor = (imageHeight/ (float) channelCount) / (normalizeMax - noiseFloor) / 2;
for (NSInteger intSample = 0 ; intSample < sampleCount ; intSample ++ ) {
Float32 left = *samples++;
float pixels = (left - noiseFloor) * sampleAdjustmentFactor;
CGContextMoveToPoint(context, intSample, centerLeft-pixels);
CGContextAddLineToPoint(context, intSample, centerLeft+pixels);
CGContextSetStrokeColorWithColor(context, leftcolor);
CGContextStrokePath(context);
if (channelCount==2) {
Float32 right = *samples++;
float pixels = (right - noiseFloor) * sampleAdjustmentFactor;
CGContextMoveToPoint(context, intSample, centerRight - pixels);
CGContextAddLineToPoint(context, intSample, centerRight + pixels);
CGContextSetStrokeColorWithColor(context, rightcolor);
CGContextStrokePath(context);
}
}
// Create new image
UIImage *newImage = UIGraphicsGetImageFromCurrentImageContext();
// Tidy up
UIGraphicsEndImageContext();
return newImage;
}
- (NSData *) renderPNGAudioPictogramLogForAssett:(AVURLAsset *)songAsset {
NSError * error = nil;
AVAssetReader * reader = [[AVAssetReader alloc] initWithAsset:songAsset error:&error];
AVAssetTrack * songTrack = [songAsset.tracks objectAtIndex:0];
NSDictionary* outputSettingsDict = [[NSDictionary alloc] initWithObjectsAndKeys:
[NSNumber numberWithInt:kAudioFormatLinearPCM],AVFormatIDKey,
// [NSNumber numberWithInt:44100.0],AVSampleRateKey, /*Not Supported*/
// [NSNumber numberWithInt: 2],AVNumberOfChannelsKey, /*Not Supported*/
[NSNumber numberWithInt:16],AVLinearPCMBitDepthKey,
[NSNumber numberWithBool:NO],AVLinearPCMIsBigEndianKey,
[NSNumber numberWithBool:NO],AVLinearPCMIsFloatKey,
[NSNumber numberWithBool:NO],AVLinearPCMIsNonInterleaved,
nil];
AVAssetReaderTrackOutput* output = [[AVAssetReaderTrackOutput alloc] initWithTrack:songTrack outputSettings:outputSettingsDict];
[reader addOutput:output];
[output release];
UInt32 sampleRate,channelCount;
NSArray* formatDesc = songTrack.formatDescriptions;
for(unsigned int i = 0; i < [formatDesc count]; ++i) {
CMAudioFormatDescriptionRef item = (CMAudioFormatDescriptionRef)[formatDesc objectAtIndex:i];
const AudioStreamBasicDescription* fmtDesc = CMAudioFormatDescriptionGetStreamBasicDescription (item);
if(fmtDesc ) {
sampleRate = fmtDesc->mSampleRate;
channelCount = fmtDesc->mChannelsPerFrame;
// NSLog(#"channels:%u, bytes/packet: %u, sampleRate %f",fmtDesc->mChannelsPerFrame, fmtDesc->mBytesPerPacket,fmtDesc->mSampleRate);
}
}
UInt32 bytesPerSample = 2 * channelCount;
Float32 normalizeMax = noiseFloor;
NSLog(#"normalizeMax = %f",normalizeMax);
NSMutableData * fullSongData = [[NSMutableData alloc] init];
[reader startReading];
UInt64 totalBytes = 0;
Float64 totalLeft = 0;
Float64 totalRight = 0;
Float32 sampleTally = 0;
NSInteger samplesPerPixel = sampleRate / 50;
while (reader.status == AVAssetReaderStatusReading){
AVAssetReaderTrackOutput * trackOutput = (AVAssetReaderTrackOutput *)[reader.outputs objectAtIndex:0];
CMSampleBufferRef sampleBufferRef = [trackOutput copyNextSampleBuffer];
if (sampleBufferRef){
CMBlockBufferRef blockBufferRef = CMSampleBufferGetDataBuffer(sampleBufferRef);
size_t length = CMBlockBufferGetDataLength(blockBufferRef);
totalBytes += length;
NSAutoreleasePool *wader = [[NSAutoreleasePool alloc] init];
NSMutableData * data = [NSMutableData dataWithLength:length];
CMBlockBufferCopyDataBytes(blockBufferRef, 0, length, data.mutableBytes);
SInt16 * samples = (SInt16 *) data.mutableBytes;
int sampleCount = length / bytesPerSample;
for (int i = 0; i < sampleCount ; i ++) {
Float32 left = (Float32) *samples++;
left = decibel(left);
left = minMaxX(left,noiseFloor,0);
totalLeft += left;
Float32 right;
if (channelCount==2) {
right = (Float32) *samples++;
right = decibel(right);
right = minMaxX(right,noiseFloor,0);
totalRight += right;
}
sampleTally++;
if (sampleTally > samplesPerPixel) {
left = totalLeft / sampleTally;
if (left > normalizeMax) {
normalizeMax = left;
}
// NSLog(#"left average = %f, normalizeMax = %f",left,normalizeMax);
[fullSongData appendBytes:&left length:sizeof(left)];
if (channelCount==2) {
right = totalRight / sampleTally;
if (right > normalizeMax) {
normalizeMax = right;
}
[fullSongData appendBytes:&right length:sizeof(right)];
}
totalLeft = 0;
totalRight = 0;
sampleTally = 0;
}
}
[wader drain];
CMSampleBufferInvalidate(sampleBufferRef);
CFRelease(sampleBufferRef);
}
}
NSData * finalData = nil;
if (reader.status == AVAssetReaderStatusFailed || reader.status == AVAssetReaderStatusUnknown){
// Something went wrong. Handle it.
}
if (reader.status == AVAssetReaderStatusCompleted){
// You're done. It worked.
NSLog(#"rendering output graphics using normalizeMax %f",normalizeMax);
UIImage *test = [self audioImageLogGraph:(Float32 *) fullSongData.bytes
normalizeMax:normalizeMax
sampleCount:fullSongData.length / (sizeof(Float32) * 2)
channelCount:2
imageHeight:100];
finalData = imageToData(test);
}
[fullSongData release];
[reader release];
return finalData;
}
#end
Been reading your question and created a control for this. Looks like this:
Code here:
https://github.com/fulldecent/FDWaveformView
Discussion here:
https://www.cocoacontrols.com/controls/fdwaveformview
UPDATE 2015-01-29: This project is going strong and making consistent releases. Thanks for SO for all the exposure!
I can give you reference of the one that I have implemented in my application. It was apple's example. Here is the example of AurioTouch which analyzes 3 types of sound audio. Apple has still not provided to analyse directly the audio waves... so this example also uses the Mic to analyse the sound...
Amongst 3 I have used only Oscilloscope for analysing the amplitude effect. I have to change that code drastically to match my requirement, so best of luck if you are going to use...
You can also see one more example using such amplitude : SpeakHere of Apple
This is the code I have used to convert my audio data (audio file ) into floating point representation and saved into an array.
-(void) PrintFloatDataFromAudioFile {
NSString * name = #"Filename"; //YOUR FILE NAME
NSString * source = [[NSBundle mainBundle] pathForResource:name ofType:#"m4a"]; // SPECIFY YOUR FILE FORMAT
const char *cString = [source cStringUsingEncoding:NSASCIIStringEncoding];
CFStringRef str = CFStringCreateWithCString(
NULL,
cString,
kCFStringEncodingMacRoman
);
CFURLRef inputFileURL = CFURLCreateWithFileSystemPath(
kCFAllocatorDefault,
str,
kCFURLPOSIXPathStyle,
false
);
ExtAudioFileRef fileRef;
ExtAudioFileOpenURL(inputFileURL, &fileRef);
AudioStreamBasicDescription audioFormat;
audioFormat.mSampleRate = 44100; // GIVE YOUR SAMPLING RATE
audioFormat.mFormatID = kAudioFormatLinearPCM;
audioFormat.mFormatFlags = kLinearPCMFormatFlagIsFloat;
audioFormat.mBitsPerChannel = sizeof(Float32) * 8;
audioFormat.mChannelsPerFrame = 1; // Mono
audioFormat.mBytesPerFrame = audioFormat.mChannelsPerFrame * sizeof(Float32); // == sizeof(Float32)
audioFormat.mFramesPerPacket = 1;
audioFormat.mBytesPerPacket = audioFormat.mFramesPerPacket * audioFormat.mBytesPerFrame; // = sizeof(Float32)
// 3) Apply audio format to the Extended Audio File
ExtAudioFileSetProperty(
fileRef,
kExtAudioFileProperty_ClientDataFormat,
sizeof (AudioStreamBasicDescription), //= audioFormat
&audioFormat);
int numSamples = 1024; //How many samples to read in at a time
UInt32 sizePerPacket = audioFormat.mBytesPerPacket; // = sizeof(Float32) = 32bytes
UInt32 packetsPerBuffer = numSamples;
UInt32 outputBufferSize = packetsPerBuffer * sizePerPacket;
// So the lvalue of outputBuffer is the memory location where we have reserved space
UInt8 *outputBuffer = (UInt8 *)malloc(sizeof(UInt8 *) * outputBufferSize);
AudioBufferList convertedData ;//= malloc(sizeof(convertedData));
convertedData.mNumberBuffers = 1; // Set this to 1 for mono
convertedData.mBuffers[0].mNumberChannels = audioFormat.mChannelsPerFrame; //also = 1
convertedData.mBuffers[0].mDataByteSize = outputBufferSize;
convertedData.mBuffers[0].mData = outputBuffer; //
UInt32 frameCount = numSamples;
float *samplesAsCArray;
int j =0;
double floatDataArray[882000] ; // SPECIFY YOUR DATA LIMIT MINE WAS 882000 , SHOULD BE EQUAL TO OR MORE THAN DATA LIMIT
while (frameCount > 0) {
ExtAudioFileRead(
fileRef,
&frameCount,
&convertedData
);
if (frameCount > 0) {
AudioBuffer audioBuffer = convertedData.mBuffers[0];
samplesAsCArray = (float *)audioBuffer.mData; // CAST YOUR mData INTO FLOAT
for (int i =0; i<1024 /*numSamples */; i++) { //YOU CAN PUT numSamples INTEAD OF 1024
floatDataArray[j] = (double)samplesAsCArray[i] ; //PUT YOUR DATA INTO FLOAT ARRAY
printf("\n%f",floatDataArray[j]); //PRINT YOUR ARRAY'S DATA IN FLOAT FORM RANGING -1 TO +1
j++;
}
}
}}
this is my answer, thx all geek
obj-c
here is code:
-(void) listenerData:(NSNotification *) notify
{
int resnum=112222;
unsigned int bitsum=0;
for(int i=0;i<4;i++)
{
bitsum+=(resnum>>(i*8))&0xff;
}
bitsum=bitsum&0xff;
NSString * check=[NSString stringWithFormat:#"%x %x",resnum,bitsum];
check=nil;
self.drawData=notify.object;``
[self setNeedsDisplay];
}
-(void)drawRect:(CGRect)rect
{
NSArray *data=self.drawData;
NSData *tdata=[data objectAtIndex:0];
double *srcdata=(double*)tdata.bytes;
int datacount=tdata.length/sizeof(double);
tdata=[data objectAtIndex:1];
double *coveddata=(double*)tdata.bytes;
CGContextRef context=UIGraphicsGetCurrentContext();
CGRect bounds=self.bounds;
CGContextClearRect(context, bounds);
CGFloat midpos=bounds.size.height/2;
CGContextBeginPath(context);
const double scale=1.0/100;
CGFloat step=bounds.size.width/(datacount-1);
CGContextMoveToPoint(context, 0, midpos);
CGFloat xpos=0;
for(int i=0;i<datacount;i++)
{
CGContextAddLineToPoint(context, xpos, midpos-srcdata[i]*scale);
xpos+=step;
}
CGContextAddLineToPoint(context, bounds.size.width, midpos);
CGContextClosePath(context);
CGContextSetRGBFillColor(context, 1.0, 0.0, 0.0, 1.0);
CGContextFillPath(context);
CGContextBeginPath(context);
const double scale2=1.0/100;
CGContextMoveToPoint(context, 0, midpos);
xpos=0;
for(int i=0;i<datacount;i++)
{
CGContextAddLineToPoint(context, xpos, midpos+coveddata[i]*scale2);
xpos+=step;
}
CGContextAddLineToPoint(context, bounds.size.width, midpos);
CGContextClosePath(context);
CGContextSetRGBFillColor(context, 1.0, 0.0, 1.0, 1.0);
CGContextFillPath(context);
}