How to get video frame of the AVPlayer? - objective-c

I have PlayerView class for displaying AVPlayer's playback. Code from documentation.
#import <UIKit/UIKit.h>
#import <AVFoundation/AVFoundation.h>
#interface PlayerView : UIView
#property (nonatomic) AVPlayer *player;
#end
#implementation PlayerView
+ (Class)layerClass {
return [AVPlayerLayer class];
}
- (AVPlayer*)player {
return [(AVPlayerLayer *)[self layer] player];
}
- (void)setPlayer:(AVPlayer *)player {
[(AVPlayerLayer *)[self layer] setPlayer:player];
}
#end
I set up my AVPlayer (contains video asset with size 320x240) in this PlayerView (with frame.size.width = 100, frame.size.height = 100) and my video is resized. How can i get size of video after adding in PlayerView?

In iOS 7.0 added new feature:
AVPlayerLayer has property videoRect.

This worked for me. When you don't have the AVPLayerLayer.
- (CGRect)videoRect {
// #see http://stackoverflow.com/a/6565988/1545158
AVAssetTrack *track = [[self.player.currentItem.asset tracksWithMediaType:AVMediaTypeVideo] firstObject];
if (!track) {
return CGRectZero;
}
CGSize trackSize = [track naturalSize];
CGSize videoViewSize = self.videoView.bounds.size;
CGFloat trackRatio = trackSize.width / trackSize.height;
CGFloat videoViewRatio = videoViewSize.width / videoViewSize.height;
CGSize newSize;
if (videoViewRatio > trackRatio) {
newSize = CGSizeMake(trackSize.width * videoViewSize.height / trackSize.height, videoViewSize.height);
} else {
newSize = CGSizeMake(videoViewSize.width, trackSize.height * videoViewSize.width / trackSize.width);
}
CGFloat newX = (videoViewSize.width - newSize.width) / 2;
CGFloat newY = (videoViewSize.height - newSize.height) / 2;
return CGRectMake(newX, newY, newSize.width, newSize.height);
}

Found a solution:
Add to PlayerView class:
- (CGRect)videoContentFrame {
AVPlayerLayer *avLayer = (AVPlayerLayer *)[self layer];
// AVPlayerLayerContentLayer
CALayer *layer = (CALayer *)[[avLayer sublayers] objectAtIndex:0];
CGRect transformedBounds = CGRectApplyAffineTransform(layer.bounds, CATransform3DGetAffineTransform(layer.sublayerTransform));
return transformedBounds;
}

Here's the solution that's working for me, takes into account the positioning of the AVPlayer within the view. I just added this to the PlayerView custom class. I had to solve this because doesn't appear that videoRect is working in 10.7.
- (NSRect) videoRect {
NSRect theVideoRect = NSMakeRect(0,0,0,0);
NSRect theLayerRect = self.playerLayer.frame;
NSSize theNaturalSize = NSSizeFromCGSize([[[self.movie asset] tracksWithMediaType:AVMediaTypeVideo][0] naturalSize]);
float movieAspectRatio = theNaturalSize.width/theNaturalSize.height;
float viewAspectRatio = theLayerRect.size.width/theLayerRect.size.height;
if (viewAspectRatio < movieAspectRatio) {
theVideoRect.size.width = theLayerRect.size.width;
theVideoRect.size.height = theLayerRect.size.width/movieAspectRatio;
theVideoRect.origin.x = 0;
theVideoRect.origin.y = (theLayerRect.size.height/2) - (theVideoRect.size.height/2);
}
else if (viewAspectRatio > movieAspectRatio) {
theVideoRect.size.width = movieAspectRatio * theLayerRect.size.height;
theVideoRect.size.height = theLayerRect.size.height;
theVideoRect.origin.x = (theLayerRect.size.width/2) - (theVideoRect.size.width/2);
theVideoRect.origin.y = 0;
}
return theVideoRect;
}

Here is Eric Badros' answer ported to iOS. I also added preferredTransform handling. This assumes _player is an AVPlayer
- (CGRect) videoRect {
CGRect theVideoRect = CGRectZero;
// Replace this with whatever frame your AVPlayer is playing inside of:
CGRect theLayerRect = self.playerLayer.frame;
AVAssetTrack *track = [_player.currentItem.asset tracksWithMediaType:AVMediaTypeVideo][0];
CGSize theNaturalSize = [track naturalSize];
theNaturalSize = CGSizeApplyAffineTransform(theNaturalSize, track.preferredTransform);
theNaturalSize.width = fabs(theNaturalSize.width);
theNaturalSize.height = fabs(theNaturalSize.height);
CGFloat movieAspectRatio = theNaturalSize.width / theNaturalSize.height;
CGFloat viewAspectRatio = theLayerRect.size.width / theLayerRect.size.height;
// Note change this *greater than* to a *less than* if your video will play in aspect fit mode (as opposed to aspect fill mode)
if (viewAspectRatio > movieAspectRatio) {
theVideoRect.size.width = theLayerRect.size.width;
theVideoRect.size.height = theLayerRect.size.width / movieAspectRatio;
theVideoRect.origin.x = 0;
theVideoRect.origin.y = (theLayerRect.size.height - theVideoRect.size.height) / 2;
} else if (viewAspectRatio < movieAspectRatio) {
theVideoRect.size.width = movieAspectRatio * theLayerRect.size.height;
theVideoRect.size.height = theLayerRect.size.height;
theVideoRect.origin.x = (theLayerRect.size.width - theVideoRect.size.width) / 2;
theVideoRect.origin.y = 0;
}
return theVideoRect;
}

Here is a Swift solution based on Andrey Banshchikov's answer. His solution is especially useful when you don't have access to AVPLayerLayer.
func currentVideoFrameSize(playerView: AVPlayerView, player: AVPlayer) -> CGSize {
// See https://stackoverflow.com/a/40164496/1877617
let track = player.currentItem?.asset.tracks(withMediaType: .video).first
if let track = track {
let trackSize = track.naturalSize
let videoViewSize = playerView.bounds.size
let trackRatio = trackSize.width / trackSize.height
let videoViewRatio = videoViewSize.width / videoViewSize.height
var newSize: CGSize
if videoViewRatio > trackRatio {
newSize = CGSize(width: trackSize.width * videoViewSize.height / trackSize.height, height: videoViewSize.height)
} else {
newSize = CGSize(width: videoViewSize.width, height: trackSize.height * videoViewSize.width / trackSize.width)
}
return newSize
}
return CGSize.zero
}

Andrey's answer worked for file/local/downloaded video, but didn't work for streamed HLS video. As a 2nd attempt, I was able to find the video track right inside of the "tracks" property of currentItem. Also rewritten to Swift.
private func videoRect() -> CGRect {
// Based on https://stackoverflow.com/a/40164496 - originally objective-c
var trackTop: AVAssetTrack? = nil
if let track1 = self.player?.currentItem?.asset.tracks(withMediaType: AVMediaType.video).first {
trackTop = track1
}
else {
// For some reason the above way wouldn't find the "track" for streamed HLS video.
// This seems to work for streamed HLS.
if let tracks = self.player?.currentItem?.tracks {
for avplayeritemtrack in tracks {
if let assettrack = avplayeritemtrack.assetTrack {
if assettrack.mediaType == .video {
// Found an assetTrack here?
trackTop = assettrack
break
}
}
}
}
}
guard let track = trackTop else {
print("Failed getting track")
return CGRect.zero
}
let trackSize = track.naturalSize
let videoViewSize = self.view.bounds.size
let trackRatio = trackSize.width / trackSize.height
let videoViewRatio = videoViewSize.width / videoViewSize.height
let newSize: CGSize
if videoViewRatio > trackRatio {
newSize = CGSize.init(width: trackSize.width * videoViewSize.height / trackSize.height, height: videoViewSize.height)
}
else {
newSize = CGSize.init(width: videoViewSize.width, height: trackSize.height * videoViewSize.width / trackSize.width)
}
let newX: CGFloat = (videoViewSize.width - newSize.width) / 2
let newY: CGFloat = (videoViewSize.height - newSize.height) / 2
return CGRect.init(x: newX, y: newY, width: newSize.width, height: newSize.height)
}
Much simpler approach
I found another way. Seems to be much easier, if you're using AVPlayerViewController. Why didn't I find this earlier. 🀧
return self.videoBounds

2022 SwiftUI
struct PlayerViewController: UIViewControllerRepresentable {
private let avController = AVPlayerViewController()
var videoURL: URL?
private var player: AVPlayer {
return AVPlayer(url: videoURL!)
}
// πŸ‘‡πŸΌπŸ‘‡πŸΌπŸ‘‡πŸΌ HERE πŸ‘‡πŸΌπŸ‘‡πŸΌπŸ‘‡πŸΌ
func getVideoFrame() -> CGRect {
self.avController.videoBounds
}
// πŸ‘†πŸΌπŸ‘†πŸΌπŸ‘†πŸΌ HERE πŸ‘†πŸΌπŸ‘†πŸΌπŸ‘†πŸΌ
func makeUIViewController(context: Context) -> AVPlayerViewController {
avController.modalPresentationStyle = .fullScreen
avController.player = player
avController.player?.play()
return avController
}
func updateUIViewController(_ playerController: AVPlayerViewController, context: Context) {}
}

Related

Creating parallax focus effect on UICollectionViewCell

How do you create the parallax focus effect on a collection view cell with a custom view? If I were using an image view the property to set would be adjustsImageWhenAncestorFocused but my collection view cell contains a subclassed UIView with custom content drawn using core graphics.
The answer by #raulriera is nice, but only shifts the cell around in 2D.
Also, the OP asked for an objective-C example.
I was also looking to do this effect for the exact same reason - I had UICollectionView with cells containing images and labels.
I created a UIMotionEffectGroup subclass, since getting near to the Apple TV effect seems to require four different motion effects. The first two are the flat movements as in #raulriera, and the other two are the 3D rotations.
Just the shiny environment layer to go now. Any takers? :-)
Here is my code for the motion effect group:
(The shiftDistance and tiltAngle constants set the magnitude of the effect. The given values look pretty similar to the Apple TV effect.)
#import <UIKit/UIKit.h>
#import "UIAppleTvMotionEffectGroup.h"
#implementation UIAppleTvMotionEffectGroup
- (id)init
{
if ((self = [super init]) != nil)
{
// Size of shift movements
CGFloat const shiftDistance = 10.0f;
// Make horizontal movements shift the centre left and right
UIInterpolatingMotionEffect *xShift = [[UIInterpolatingMotionEffect alloc]
initWithKeyPath:#"center.x"
type:UIInterpolatingMotionEffectTypeTiltAlongHorizontalAxis];
xShift.minimumRelativeValue = [NSNumber numberWithFloat: shiftDistance * -1.0f];
xShift.maximumRelativeValue = [NSNumber numberWithFloat: shiftDistance];
// Make vertical movements shift the centre up and down
UIInterpolatingMotionEffect *yShift = [[UIInterpolatingMotionEffect alloc]
initWithKeyPath:#"center.y"
type:UIInterpolatingMotionEffectTypeTiltAlongVerticalAxis];
yShift.minimumRelativeValue = [NSNumber numberWithFloat: shiftDistance * -1.0f];
yShift.maximumRelativeValue = [NSNumber numberWithFloat: shiftDistance];
// Size of tilt movements
CGFloat const tiltAngle = M_PI_4 * 0.125;
// Now make horizontal movements effect a rotation about the Y axis for side-to-side rotation.
UIInterpolatingMotionEffect *xTilt = [[UIInterpolatingMotionEffect alloc] initWithKeyPath:#"layer.transform" type:UIInterpolatingMotionEffectTypeTiltAlongHorizontalAxis];
// CATransform3D value for minimumRelativeValue
CATransform3D transMinimumTiltAboutY = CATransform3DIdentity;
transMinimumTiltAboutY.m34 = 1.0 / 500;
transMinimumTiltAboutY = CATransform3DRotate(transMinimumTiltAboutY, tiltAngle * -1.0, 0, 1, 0);
// CATransform3D value for maximumRelativeValue
CATransform3D transMaximumTiltAboutY = CATransform3DIdentity;
transMaximumTiltAboutY.m34 = 1.0 / 500;
transMaximumTiltAboutY = CATransform3DRotate(transMaximumTiltAboutY, tiltAngle, 0, 1, 0);
// Set the transform property boundaries for the interpolation
xTilt.minimumRelativeValue = [NSValue valueWithCATransform3D: transMinimumTiltAboutY];
xTilt.maximumRelativeValue = [NSValue valueWithCATransform3D: transMaximumTiltAboutY];
// Now make vertical movements effect a rotation about the X axis for up and down rotation.
UIInterpolatingMotionEffect *yTilt = [[UIInterpolatingMotionEffect alloc] initWithKeyPath:#"layer.transform" type:UIInterpolatingMotionEffectTypeTiltAlongVerticalAxis];
// CATransform3D value for minimumRelativeValue
CATransform3D transMinimumTiltAboutX = CATransform3DIdentity;
transMinimumTiltAboutX.m34 = 1.0 / 500;
transMinimumTiltAboutX = CATransform3DRotate(transMinimumTiltAboutX, tiltAngle * -1.0, 1, 0, 0);
// CATransform3D value for maximumRelativeValue
CATransform3D transMaximumTiltAboutX = CATransform3DIdentity;
transMaximumTiltAboutX.m34 = 1.0 / 500;
transMaximumTiltAboutX = CATransform3DRotate(transMaximumTiltAboutX, tiltAngle, 1, 0, 0);
// Set the transform property boundaries for the interpolation
yTilt.minimumRelativeValue = [NSValue valueWithCATransform3D: transMinimumTiltAboutX];
yTilt.maximumRelativeValue = [NSValue valueWithCATransform3D: transMaximumTiltAboutX];
// Add all of the motion effects to this group
self.motionEffects = #[xShift, yShift, xTilt, yTilt];
[xShift release];
[yShift release];
[xTilt release];
[yTilt release];
}
return self;
}
#end
I used it like this in my custom UICollectionViewCell subclass:
#implementation MyCollectionViewCell
- (void)didUpdateFocusInContext:(UIFocusUpdateContext *)context withAnimationCoordinator:(UIFocusAnimationCoordinator *)coordinator
{
// Create a static instance of the motion effect group (could do this anywhere, really, maybe init would be better - we only need one of them.)
static UIAppleTVMotionEffectGroup *s_atvMotionEffect = nil;
if (s_atvMotionEffect == nil)
{
s_atvMotionEffect = [[UIAppleTVMotionEffectGroup alloc] init];
}
[coordinator addCoordinatedAnimations: ^{
if (self.focused)
{
[self addMotionEffect: s_atvMotionEffect];
}
else
{
[self removeMotionEffect: s_atvMotionEffect];
}
completion: ^{
}];
}
#end
All you need to do is add a UIMotionEffect to your subviews. Something like this
override func didUpdateFocusInContext(context: UIFocusUpdateContext, withAnimationCoordinator coordinator: UIFocusAnimationCoordinator) {
coordinator.addCoordinatedAnimations({ [unowned self] in
if self.focused {
let verticalMotionEffect = UIInterpolatingMotionEffect(keyPath: "center.y", type: .TiltAlongVerticalAxis)
verticalMotionEffect.minimumRelativeValue = -10
verticalMotionEffect.maximumRelativeValue = 10
let horizontalMotionEffect = UIInterpolatingMotionEffect(keyPath: "center.x", type: .TiltAlongHorizontalAxis)
horizontalMotionEffect.minimumRelativeValue = -10
horizontalMotionEffect.maximumRelativeValue = 10
let motionEffectGroup = UIMotionEffectGroup()
motionEffectGroup.motionEffects = [horizontalMotionEffect, verticalMotionEffect]
yourView.addMotionEffect(motionEffectGroup)
}
else {
// Remove the effect here
}
}, completion: nil)
}
I've converted Simon Tillson's answer to swift 3.0 and posted here to save typing for people in the future. Thanks very much for a great solution.
class UIAppleTVMotionEffectGroup : UIMotionEffectGroup{
// size of shift movements
let shiftDistance : CGFloat = 10.0
let tiltAngle : CGFloat = CGFloat(M_PI_4) * 0.125
required init?(coder aDecoder: NSCoder) {
super.init(coder: aDecoder)
}
override init() {
super.init()
// Make horizontal movements shift the centre left and right
let xShift = UIInterpolatingMotionEffect(keyPath: "center.x", type: UIInterpolatingMotionEffectType.tiltAlongHorizontalAxis)
xShift.minimumRelativeValue = shiftDistance * -1.0
xShift.maximumRelativeValue = shiftDistance
let yShift = UIInterpolatingMotionEffect(keyPath: "center.y", type: UIInterpolatingMotionEffectType.tiltAlongVerticalAxis)
yShift.minimumRelativeValue = 0.0-shiftDistance
yShift.maximumRelativeValue = shiftDistance
let xTilt = UIInterpolatingMotionEffect(keyPath: "layer.transform", type: UIInterpolatingMotionEffectType.tiltAlongHorizontalAxis)
var transMinimumTiltAboutY = CATransform3DIdentity
transMinimumTiltAboutY.m34 = 1.0 / 500.0
transMinimumTiltAboutY = CATransform3DRotate(transMinimumTiltAboutY, tiltAngle * -1.0, 0, 1, 0)
var transMaximumTiltAboutY = CATransform3DIdentity
transMaximumTiltAboutY.m34 = 1.0 / 500.0
transMaximumTiltAboutY = CATransform3DRotate(transMaximumTiltAboutY, tiltAngle , 0, 1, 0)
xTilt.minimumRelativeValue = transMinimumTiltAboutY
xTilt.maximumRelativeValue = transMaximumTiltAboutY
let yTilt = UIInterpolatingMotionEffect(keyPath: "layer.transform", type: UIInterpolatingMotionEffectType.tiltAlongVerticalAxis)
var transMinimumTiltAboutX = CATransform3DIdentity
transMinimumTiltAboutX.m34 = 1.0 / 500.0
transMinimumTiltAboutX = CATransform3DRotate(transMinimumTiltAboutX, tiltAngle * -1.0, 1, 0, 0)
var transMaximumTiltAboutX = CATransform3DIdentity
transMaximumTiltAboutX.m34 = 1.0 / 500.0
transMaximumTiltAboutX = CATransform3DRotate(transMaximumTiltAboutX, tiltAngle , 1, 0, 0)
yTilt.minimumRelativeValue = transMinimumTiltAboutX
yTilt.maximumRelativeValue = transMaximumTiltAboutX
self.motionEffects = [xShift,yShift,xTilt,yTilt]
}
}
I have added a little pop to the part in the UICollectionView subclass. Note the struct wrapper for the static variable
override func didUpdateFocus(in context: UIFocusUpdateContext, with coordinator: UIFocusAnimationCoordinator) {
struct wrapper {
static let s_atvMotionEffect = UIAppleTVMotionEffectGroup()
}
coordinator.addCoordinatedAnimations( {
var scale : CGFloat = 0.0
if self.isFocused {
self.addMotionEffect(wrapper.s_atvMotionEffect)
scale = 1.2
} else {
self.removeMotionEffect(wrapper.s_atvMotionEffect)
scale = 1.0
}
let transform = CGAffineTransform(scaleX: scale, y: scale)
self.layer.setAffineTransform(transform)
},completion: nil)
}

How can I restrict the scrolling of a UIImageView to the bounds of the UIImage like in iPhoto?

Here is an image in iPhoto:
Here is it zoomed in to the top left corner in iPhoto:
Here is the same image in my app:
Here it is zoomed in to the top left corner in my app:
How can I lose the excess grey space surrounding my image and restrict the scrolling to the bounds of the UIImage like iPhoto?
Thanks
You could use a library that does that. Try MWPhotoBrowser.
So, aside from using a 3rd party library, I solved this problem using:
iOS. How do I restrict UIScrollview scrolling to a limited extent? , adapting the answer to the following methods, which are I hope self explanatory:
- (CGRect) methodThatGetsImageSizeOnScreen
{
float frameHeight;
float frameWidth;
float frameXOrigin;
float frameYOrigin;
float threshold;
BOOL thisImageTouchesLeftAndRight;
UIInterfaceOrientation thisOrientation = self.interfaceOrientation;
if (UIInterfaceOrientationIsLandscape(thisOrientation)){
threshold = 748.0/1024.0;
if ((self.imageToPresent.size.height == self.imageToPresent.size.width) | ((self.imageToPresent.size.height/self.imageToPresent.size.width) > threshold)){
thisImageTouchesLeftAndRight = NO;
frameWidth = (748/self.imageToPresent.size.height)*self.imageToPresent.size.width;
frameHeight = 748;
frameXOrigin = (1024-frameWidth)/2;
frameYOrigin = 0;
}
else
{
thisImageTouchesLeftAndRight = YES;
frameWidth = 1024;
frameHeight = (1024/self.imageToPresent.size.width)*self.imageToPresent.size.height;
frameXOrigin = 0;
frameYOrigin = (748-frameHeight)/2;
}
}
else {
threshold = 768.0/1004.0;
if ((self.imageToPresent.size.height == self.imageToPresent.size.width) | ((self.imageToPresent.size.width/self.imageToPresent.size.height) > threshold)){
thisImageTouchesLeftAndRight = YES;
frameWidth = 768;
frameHeight = (768/self.imageToPresent.size.width)*self.imageToPresent.size.height;
frameXOrigin = 0;
frameYOrigin = (1004-frameHeight)/2;
}
else
{
thisImageTouchesLeftAndRight = NO;
frameWidth = (1004/self.imageToPresent.size.height)*self.imageToPresent.size.width;
frameHeight = 1004;
frameXOrigin = (768-frameWidth)/2;
frameYOrigin = 0;
}
}
CGRect theRect = CGRectMake(frameXOrigin, frameYOrigin, frameWidth, frameHeight);
return theRect;
}
#pragma mark - UIScrollViewDelegate
- (void) scrollViewDidScroll:(UIScrollView*)scroll{
UIInterfaceOrientation thisOrientation = self.interfaceOrientation;
float largeDimension;
float smallDimension;
if (UIInterfaceOrientationIsLandscape(thisOrientation)){
largeDimension = 1024;
smallDimension = 748;
}
else{
largeDimension = 1004;
smallDimension = 768;
}
CGPoint offset = scroll.contentOffset;
CGRect results = [self methodThatGetsImageSizeOnScreen];
float frameHeight = results.size.height;
float frameYOrigin = results.origin.y;
float frameWidth = results.size.width;
float frameXOrigin = results.origin.x;
//So, we start the limiting of a landscape image in portrait (in the y direction) when we exceed the following criteria:
if((frameHeight*self.scrollView.zoomScale) > largeDimension){
if(offset.y < self.scrollView.zoomScale*frameYOrigin) offset.y = self.scrollView.zoomScale*frameYOrigin;
if(offset.y > ((self.scrollView.zoomScale*frameYOrigin)+(frameHeight*self.scrollView.zoomScale)-largeDimension)) offset.y = ((self.scrollView.zoomScale*frameYOrigin)+(frameHeight*self.scrollView.zoomScale)-largeDimension);
}
if((frameWidth*self.scrollView.zoomScale) > largeDimension){
if(offset.x < self.scrollView.zoomScale*frameXOrigin) offset.x = self.scrollView.zoomScale*frameXOrigin;
if(offset.x > ((self.scrollView.zoomScale*frameXOrigin)+(frameWidth*self.scrollView.zoomScale)-largeDimension)) offset.x = ((self.scrollView.zoomScale*frameXOrigin)+(frameWidth*self.scrollView.zoomScale)-largeDimension);
}
// Set offset to adjusted value
scroll.contentOffset = offset;
//Remember you may want your minimum zoomScale set in viewDidLoad or viewWillAppear
}

schedule update issue

This might sound pretty straightforward. I've created a method and I've called it as below in the init method.
[self createNewSpr:ccp(s.width * 0.25,s.height-200)];
[self createNewSpr:ccp(s.width * 0.50,s.height-200)];
[self createNewSpr:ccp(s.width * 0.75,s.height-200)];
[self scheduleUpdate];
I've defined a for loop in my update method that imposes a gravity higher than that of the world on the sprites. Only the last call is affected by the new gravity but the first and second act on the world gravity. I am not sure what is wrong but I suspect it to be the scheduleUpdate. Please Help.
Edit: Update Method :
-(void) update: (ccTime) dt
{
int32 velocityIterations = 8;
int32 positionIterations = 1;
world->Step(dt, velocityIterations, positionIterations);
for (b2Body* b = world->GetBodyList(); b; b = b->GetNext())
{
if (b == sprite)
{
b->ApplyForce( b2Vec2(0.0,20*b->GetMass()),b->GetWorldCenter());
}
}
}
the createNewSpr:
-(void) createNewSpr:(CGPoint)pos {
//CGSize s = [CCDirector sharedDirector].winSize;
b2Vec2 startPos = [self toMeters:pos];
CGFloat linkHeight = 0.24;
CGFloat linkWidth = 0.1;
b2BodyDef bodyDef;
bodyDef.type = b2_dynamicBody;
bodyDef.position = startPos;
b2FixtureDef fixtureDef;
fixtureDef.density = 0.1;
b2PolygonShape polygonShape;
polygonShape.SetAsBox(linkWidth,linkHeight);
fixtureDef.shape = &polygonShape;
//first
b2Body* link = world->CreateBody( &bodyDef );
link->CreateFixture( &fixtureDef );
PhysicsSprite* segmentSprite = [PhysicsSprite spriteWithFile:#"sg.png"];
[self addChild:segmentSprite];
[segmentSprite setPhysicsBody:link];
b2RevoluteJointDef revoluteJointDef;
revoluteJointDef.localAnchorA.Set( 0, linkHeight);
revoluteJointDef.localAnchorB.Set( 0, -linkHeight);
for (int i = 0; i < 10; i++) {
b2Body* newLink = world->CreateBody( &bodyDef );
newLink->CreateFixture( &fixtureDef );
PhysicsSprite* segmentSprite = [PhysicsSprite spriteWithFile:#"sg.png"];
[self addChild:segmentSprite];
[segmentSprite setPhysicsBody:link];
revoluteJointDef.bodyA = link;
revoluteJointDef.bodyB = newLink;
world->CreateJoint( &revoluteJointDef );
link = newLink;//next iteration
}
PhysicsSprite* circleBodySprite = [PhysicsSprite spriteWithFile:#"cb.png"];
[self addChild:circleBodySprite z:1];
b2CircleShape circleShape;
circleShape.m_radius = circleBodySprite.contentSize.width/2 / PTM_RATIO;
fixtureDef.shape = &circleShape;
b2Body* chainBase =world->CreateBody( &bodyDef );
chainBase->CreateFixture( &fixtureDef );
[circleBodySprite setPhysicsBody:chainBase];
sprite = chainBase;
revoluteJointDef.bodyA = link;
revoluteJointDef.bodyB = chainBase;
revoluteJointDef.localAnchorA.Set(0,linkWidth);
revoluteJointDef.localAnchorB.Set(0,linkWidth);
world->CreateJoint( &revoluteJointDef );
}
The problem is with createNewSpr method...
You have assigned sprite to a body... So When you call this method three times, sprite refers to 3rd object only..
When you compare in update method, it just puts gravity on 3rd object...
Hope this helps.. :)

cocos2d and chipmunk rotate object after collision

hi
im pretty new to both frameworks. but maybe someone can point me into the right direction:
basically i try to bounce a ball of a shape. (works fine)
but it would be great when the ball would rotate, too
here is my (copy & paste) code
// BallLayer.m
#import "BallLayer.h"
void updateShape(void* ptr, void* unused){
cpShape* shape = (cpShape*)ptr;
Sprite* sprite = shape->data;
if(sprite){
cpBody* body = shape->body;
[sprite setPosition:cpv(body->p.x, body->p.y)];
}
}
#implementation BallLayer
-(void)tick:(ccTime)dt{
cpSpaceStep(space, 1.0f/60.0f);
cpSpaceHashEach(space->activeShapes, &updateShape, nil);
}
-(void)setupChipmunk{
cpInitChipmunk();
space = cpSpaceNew();
space->gravity = cpv(0,-2000);
space->elasticIterations = 1;
[self schedule: #selector(tick:) interval: 1.0f/60.0f];
cpBody* ballBody = cpBodyNew(200.0, cpMomentForCircle(100.0, 10, 10, cpvzero));
ballBody->p = cpv(150, 400);
cpSpaceAddBody(space, ballBody);
cpShape* ballShape = cpCircleShapeNew(ballBody, 20.0, cpvzero);
ballShape->e = 0.8;
ballShape->u = 0.8;
ballShape->data = ballSprite;
ballShape->collision_type = 1;
cpSpaceAddShape(space, ballShape);
cpBody* floorBody = cpBodyNew(INFINITY, INFINITY);
floorBody->p = cpv(0, 0);
cpShape* floorShape = cpSegmentShapeNew(floorBody, cpv(0,0), cpv(320,160), 0);
floorShape->e = 0.5;
floorShape->u = 0.1;
floorShape->collision_type = 0;
cpSpaceAddStaticShape(space, floorShape);
floorShape = cpSegmentShapeNew(floorBody, cpv(0,200), cpv(320,0), 0);
cpSpaceAddStaticShape(space, floorShape);
}
-(id)init{
self = [super init];
if(nil != self){
ballSprite = [Sprite spriteWithFile:#"ball2.png"];
[ballSprite setPosition:CGPointMake(150, 400)];
[self add:ballSprite];
[self setupChipmunk];
}
return self;
}
#end
please help me out.
well when i decided psoting it i found the solution :)
void updateShape(void* ptr, void* unused)
{
cpShape* shape = (cpShape*)ptr;
Sprite* sprite = shape->data;
if(sprite){
cpBody* body = shape->body;
[sprite setPosition:cpv(body->p.x, body->p.y)];
[sprite setRotation: (float) CC_RADIANS_TO_DEGREES( -body->a )];
}
}

UIColor comparison

Given a UIColor, I need to determine if it is "light" or "dark". If I could access the hex value of the color, I could just check if it was greater than or less than a certain threshold hex number, but there appears to be no way to do that. Is there? Or is there another way I could check the brightness value of a UIColor?
You could install this Category for extending UIColor for knowing HSV/HSB and compare [aUIColor brightness]
Edit:
I found the same code in some github-hosted project, made a gist of it: https://gist.github.com/1252197
#import "UIColor-HSVAdditions.h"
#implementation UIColor (UIColor_HSVAdditions)
+(struct hsv_color)HSVfromRGB:(struct rgb_color)rgb
{
struct hsv_color hsv;
CGFloat rgb_min, rgb_max;
rgb_min = MIN3(rgb.r, rgb.g, rgb.b);
rgb_max = MAX3(rgb.r, rgb.g, rgb.b);
hsv.val = rgb_max;
if (hsv.val == 0) {
hsv.hue = hsv.sat = 0;
return hsv;
}
rgb.r /= hsv.val;
rgb.g /= hsv.val;
rgb.b /= hsv.val;
rgb_min = MIN3(rgb.r, rgb.g, rgb.b);
rgb_max = MAX3(rgb.r, rgb.g, rgb.b);
hsv.sat = rgb_max - rgb_min;
if (hsv.sat == 0) {
hsv.hue = 0;
return hsv;
}
if (rgb_max == rgb.r) {
hsv.hue = 0.0 + 60.0*(rgb.g - rgb.b);
if (hsv.hue < 0.0) {
hsv.hue += 360.0;
}
} else if (rgb_max == rgb.g) {
hsv.hue = 120.0 + 60.0*(rgb.b - rgb.r);
} else /* rgb_max == rgb.b */ {
hsv.hue = 240.0 + 60.0*(rgb.r - rgb.g);
}
return hsv;
}
-(CGFloat)hue
{
struct hsv_color hsv;
struct rgb_color rgb;
rgb.r = [self red];
rgb.g = [self green];
rgb.b = [self blue];
hsv = [UIColor HSVfromRGB: rgb];
return (hsv.hue / 360.0);
}
-(CGFloat)saturation
{
struct hsv_color hsv;
struct rgb_color rgb;
rgb.r = [self red];
rgb.g = [self green];
rgb.b = [self blue];
hsv = [UIColor HSVfromRGB: rgb];
return hsv.sat;
}
-(CGFloat)brightness
{
struct hsv_color hsv;
struct rgb_color rgb;
rgb.r = [self red];
rgb.g = [self green];
rgb.b = [self blue];
hsv = [UIColor HSVfromRGB: rgb];
return hsv.val;
}
-(CGFloat)value
{
return [self brightness];
}
#end
[UIColor CGColor] will get you a CGColorRef, from there you can do CGColorGetComponents to get the individual components. Getting the "brightness" value depends on your definition of brightness. Getting an average of the components (in case of RGB color space) might be a good start.
UIColor (and CGColorRef) are generally described in RGB values. If you want to determine light or dark, you'll probably want to convert these values to something like Hue/Saturation/Brightness. But there are no built in functions like you are looking for.
here is a guide (with code provided) on UIColor expansion (using a category) and has methods such as get hexStringFromColor: It should be what you're looking for. UIColor expansion Note: I did not write this blog or code.
Proposed algorithm to calculate color / color brightness difference: http://maestric.com/doc/color_brightness_difference_calculator
(based on w3c paper)