Related
I am trying to get my texture to show up in a basic OpenGL view (subclass of UIView), but no matter which texture I use, it shows up black. The code for my view is as follows:
#implementation SymGLView
typedef struct {
float Position[3];
float Color[4];
float TexCoord[2];
} Vertex;
const Vertex Vertices[] = {
{{1, -1, 0}, {1, 0, 0, 1}, {0, 0}},
{{1, 1, 0}, {0, 1, 0, 1}, {0, 1}},
{{-1, 1, 0}, {0, 0, 1, 1}, {1, 1}},
{{-1, -1, 0}, {0, 0, 0, 1}, {1, 0}}
};
const GLubyte Indices[] = {
0, 1, 2,
2, 3, 0
};
+ (Class)layerClass {
return [CAEAGLLayer class];
}
- (void)setupLayer {
_eaglLayer = (CAEAGLLayer*) self.layer;
_eaglLayer.opaque = YES;
}
- (void)setupContext {
EAGLRenderingAPI api = kEAGLRenderingAPIOpenGLES2;
_context = [[EAGLContext alloc] initWithAPI:api];
if (!_context) {
NSLog(#"Failed to initialize OpenGLES 2.0 context");
exit(1);
}
if (![EAGLContext setCurrentContext:_context]) {
NSLog(#"Failed to set current OpenGL context");
exit(1);
}
}
- (void)setupRenderBuffer {
glGenRenderbuffers(1, &_colorRenderBuffer);
glBindRenderbuffer(GL_RENDERBUFFER, _colorRenderBuffer);
[_context renderbufferStorage:GL_RENDERBUFFER fromDrawable:_eaglLayer];
}
- (void)setupDepthBuffer {
glGenRenderbuffers(1, &_depthRenderBuffer);
glBindRenderbuffer(GL_RENDERBUFFER, _depthRenderBuffer);
glRenderbufferStorage(GL_RENDERBUFFER, GL_DEPTH_COMPONENT16, self.frame.size.width, self.frame.size.height);
}
- (void)setupFrameBuffer {
GLuint framebuffer;
glGenFramebuffers(1, &framebuffer);
glBindFramebuffer(GL_FRAMEBUFFER, framebuffer);
glFramebufferRenderbuffer(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_RENDERBUFFER, _colorRenderBuffer);
glFramebufferRenderbuffer(GL_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, GL_RENDERBUFFER, _depthRenderBuffer);
}
- (GLuint)compileShader:(NSString*)shaderName withType:(GLenum)shaderType {
NSString* shaderPath = [[NSBundle mainBundle] pathForResource:shaderName ofType:#"glsl"];
NSError* error;
NSString* shaderString = [NSString stringWithContentsOfFile:shaderPath encoding:NSUTF8StringEncoding error:&error];
if (!shaderString) {
NSLog(#"Error loading shader: %#", error.localizedDescription);
exit(1);
}
GLuint shaderHandle = glCreateShader(shaderType);
const char * shaderStringUTF8 = [shaderString UTF8String];
int shaderStringLength = [shaderString length];
glShaderSource(shaderHandle, 1, &shaderStringUTF8, &shaderStringLength);
glCompileShader(shaderHandle);
GLint compileSuccess;
glGetShaderiv(shaderHandle, GL_COMPILE_STATUS, &compileSuccess);
if (compileSuccess == GL_FALSE) {
GLchar messages[256];
glGetShaderInfoLog(shaderHandle, sizeof(messages), 0, &messages[0]);
NSString *messageString = [NSString stringWithUTF8String:messages];
NSLog(#"%#", messageString);
exit(1);
}
return shaderHandle;
}
- (void)compileShaders {
GLuint vertexShader = [self compileShader:#"SimpleVertex" withType:GL_VERTEX_SHADER];
GLuint fragmentShader = [self compileShader:#"SimpleFragment" withType:GL_FRAGMENT_SHADER];
GLuint programHandle = glCreateProgram();
glAttachShader(programHandle, vertexShader);
glAttachShader(programHandle, fragmentShader);
glLinkProgram(programHandle);
GLint linkSuccess;
glGetProgramiv(programHandle, GL_LINK_STATUS, &linkSuccess);
if (linkSuccess == GL_FALSE) {
GLchar messages[256];
glGetProgramInfoLog(programHandle, sizeof(messages), 0, &messages[0]);
NSString *messageString = [NSString stringWithUTF8String:messages];
NSLog(#"%#", messageString);
exit(1);
}
glUseProgram(programHandle);
_positionSlot = glGetAttribLocation(programHandle, "Position");
_colorSlot = glGetAttribLocation(programHandle, "SourceColor");
glEnableVertexAttribArray(_positionSlot);
glEnableVertexAttribArray(_colorSlot);
_modelViewUniform = glGetUniformLocation(programHandle, "Modelview");
_texCoordSlot = glGetAttribLocation(programHandle, "TexCoordIn");
glEnableVertexAttribArray(_texCoordSlot);
_textureUniform = glGetUniformLocation(programHandle, "Texture");
}
- (void)setupVBOs {
GLuint vertexBuffer;
glGenBuffers(1, &vertexBuffer);
glBindBuffer(GL_ARRAY_BUFFER, vertexBuffer);
glBufferData(GL_ARRAY_BUFFER, sizeof(Vertices), Vertices, GL_STATIC_DRAW);
GLuint indexBuffer;
glGenBuffers(1, &indexBuffer);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, indexBuffer);
glBufferData(GL_ELEMENT_ARRAY_BUFFER, sizeof(Indices), Indices, GL_STATIC_DRAW);
}
- (void)render {
glClearColor(0, 104.0/255.0, 55.0/255.0, 1.0);
glClear(GL_COLOR_BUFFER_BIT);
glViewport(0, 0, self.frame.size.width, self.frame.size.height);
glVertexAttribPointer(_positionSlot, 3, GL_FLOAT, GL_FALSE,
sizeof(Vertex), 0);
glVertexAttribPointer(_colorSlot, 4, GL_FLOAT, GL_FALSE,
sizeof(Vertex), (GLvoid*) (sizeof(float) * 3));
glVertexAttribPointer(_texCoordSlot, 2, GL_FLOAT, GL_FALSE,
sizeof(Vertex), (GLvoid*) (sizeof(float) * 7));
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, _floorTexture);
glUniform1i(_textureUniform, 0);
glDrawElements(GL_TRIANGLES, sizeof(Indices)/sizeof(Indices[0]),
GL_UNSIGNED_BYTE, 0);
[_context presentRenderbuffer:GL_RENDERBUFFER];
}
- (GLuint)setupTexture:(NSString *)fileName {
CGImageRef spriteImage = [UIImage imageNamed:fileName].CGImage;
if (!spriteImage) {
NSLog(#"Failed to load image %#", fileName);
exit(1);
}
size_t width = CGImageGetWidth(spriteImage);
size_t height = CGImageGetHeight(spriteImage);
GLubyte * spriteData = (GLubyte *) calloc(width*height*4, sizeof(GLubyte));
CGContextRef spriteContext = CGBitmapContextCreate(spriteData, width, height, 8, width*4,
CGImageGetColorSpace(spriteImage), kCGImageAlphaPremultipliedLast);
CGContextDrawImage(spriteContext, CGRectMake(0, 0, width, height), spriteImage);
CGContextRelease(spriteContext);
GLuint texName;
glGenTextures(1, &texName);
glBindTexture(GL_TEXTURE_2D, texName);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, width, height, 0, GL_RGB, GL_UNSIGNED_BYTE, spriteData);
free(spriteData);
return texName;
}
- (id)initWithFrame:(CGRect)frame
{
self = [super initWithFrame:frame];
if (self) {
[self setupLayer];
[self setupContext];
[self setupDepthBuffer];
[self setupRenderBuffer];
[self setupFrameBuffer];
[self compileShaders];
[self setupVBOs];
[self render];
}
_floorTexture = [self setupTexture:#"tile_floor.png"];
return self;
}
#end
Vertex shader:
attribute vec4 Position;
attribute vec2 TexCoordIn;
varying vec2 TexCoordOut;
void main(void) {
gl_Position = Position;
TexCoordOut = TexCoordIn;
}
Fragment shader:
varying lowp vec2 TexCoordOut;
uniform sampler2D Texture;
void main(void) {
gl_FragColor = texture2D(Texture, TexCoordOut);
}
I can create a gradient by changing the values for gl_FragColor, but I have tried several different textures and am at a loss.
This could be depending on the fact that your textures are not power of 2 (i.e. 512X512)
Some OpenGL drivers react in weird ways to this, some others just perform a rescaling of the textures to the nearest power of 2 size.
From OpenGL gold book you can find the below:
You can find a quite good explanation in the OpenGL Gold Book, the OpenGL ES 2.0:
In OpenGL ES 2.0, textures can have non-power-of-two (npot)
dimensions. In other words, the width and height do not need to be a
power of two. However, OpenGL ES 2.0 does have a restriction on the
wrap modes that can be used if the texture dimensions are not power of
two. That is, for npot textures, the wrap mode can only be
GL_CLAMP_TO_EDGE and the minifica- tion filter can only be GL_NEAREST
or GL_LINEAR (in other words, not mip- mapped). The extension
GL_OES_texture_npot relaxes these restrictions and allows wrap modes
of GL_REPEAT and GL_MIRRORED_REPEAT and also allows npot textures to
be mipmapped with the full set of minification filters.
I hope this helps.
Cheers
I need to draw small circles on a frame and make them to disappear after few frames (fading out). How can I achieve this with OpenGL ES?
This is what I have so far, the code to draw the circle in a frame:
[EAGLContext setCurrentContext:context];
glBindFramebufferOES(GL_FRAMEBUFFER_OES, viewFramebuffer);
glViewport(0, 0, backingWidth, backingHeight);
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
glOrthof(-1.0f, 1.0f, -1.5f, 1.5f, -1.0f, 1.0f);
glMatrixMode(GL_MODELVIEW);
glClearColor(0.0f, 0.0f, 0.0f, 1.0f);
glClear(GL_COLOR_BUFFER_BIT);
GLfloat vertices[720];
for (int i = 0; i < 720; i += 2) {
vertices[i] = (cos(DEGREES_TO_RADIANS(i)) * 1);
vertices[i+1] = (sin(DEGREES_TO_RADIANS(i)) * 1);
}
glVertexPointer(2, GL_FLOAT, 0, vertices);
glEnableClientState(GL_VERTEX_ARRAY);
glColor4f(1.0f, 1.0f, 0.0f, 1.0f);
glDrawArrays(GL_TRIANGLE_FAN, 0, 360);
glBindRenderbufferOES(GL_RENDERBUFFER_OES, viewRenderbuffer);
[context presentRenderbuffer:GL_RENDERBUFFER_OES];
What I don't understand is how to keep track of the same circle and fade it in the successive frames.
Thanks
You will need to re-render all objects (circles) on every frame. So you need to keep around a data structure which holds the information about the circles (probably their center, radius, and opacity would be sufficient). Something like this:
typedef struct Circle {
NSPoint center;
float radius;
float opacity;
} Circle;
Circle allCircles [ kMaxNumCircles ];
Then, when you go to draw them, you iterate over the array of circles, updating their opacity and then drawing them:
for (int i = 0; i < kMaxNumCircles; i++)
{
allCircles [ i ].opacity -= someDelta;
drawCircle (allCircles [ i ]);
}
The drawCircle() function could just be what you've written above:
void drawCircle(Circle c)
{
GLfloat vertices[720];
for (int i = 0; i < 720; i += 2) {
vertices[i] = (cos(DEGREES_TO_RADIANS(i)) * 1) + c.center.x;
vertices[i+1] = (sin(DEGREES_TO_RADIANS(i)) * 1) + c.center.y;
}
glVertexPointer(2, GL_FLOAT, 0, vertices);
glEnableClientState(GL_VERTEX_ARRAY);
glColor4f(1.0f, 1.0f, 0.0f, c.opacity);
glDrawArrays(GL_TRIANGLE_FAN, 0, 360);
}
I'm trying to get the bytes from a RGBA texture in openGL ES. I think I'm trying to imitate glGetTexImage from vanilla openGL. As is right now, the pixels are all nil, from the call to glClear, not the texture data I'm looking for.
This is a method in a category extending SPTexture from Sparrow Framework.
-(NSMutableData *) getPixelsFromTexture
{
GLsizei width = (GLsizei) self.width;
GLsizei height = (GLsizei) self.height;
GLint oldBind = 0;
glGetIntegerv(GL_TEXTURE_BINDING_2D, &oldBind);
glBindTexture(GL_TEXTURE_2D, self.textureID);
GLuint fbo[1];
glGenFramebuffersOES(1, fbo);
glBindFramebufferOES(GL_FRAMEBUFFER_OES, fbo[0]);
GLuint rbo[1];
glGenRenderbuffersOES(1, rbo);
glBindRenderbufferOES(GL_RENDERBUFFER_OES, rbo[0]);
glRenderbufferStorageOES(GL_RENDERBUFFER_OES, GL_RGBA4_OES, width, height);
glFramebufferRenderbufferOES(GL_FRAMEBUFFER_OES, GL_COLOR_ATTACHMENT0_OES, GL_RENDERBUFFER_OES, rbo[0]);
GLenum status = glCheckFramebufferStatusOES(GL_FRAMEBUFFER_OES);
if (status != GL_FRAMEBUFFER_COMPLETE_OES) {
NSLog(#"Incomplete FBO");
}
// draw
static float texCoords[8] = { 0.0f, 0.0f, 1.0f, 0.0f, 1.0f, 1.0f, 0.0f, 1.0f };
static float vertexCoords[8] = { -1.0f, -1.0f, 1-.0f, 1.0f, 1.0f, 1.0f, 1.0f, -1.0f };
glMatrixMode (GL_PROJECTION);
glPushMatrix();
glLoadIdentity ();
glOrthof(0.0f, self.width, self.height, 0.0f, 0.0f, 1.0f);
glMatrixMode (GL_MODELVIEW);
glPushMatrix();
glLoadIdentity();
glClearColor(0.0f, 0.0f, 0.0f, 0.0f);
glClear(GL_COLOR_BUFFER_BIT);
glEnableClientState(GL_TEXTURE_COORD_ARRAY);
glEnableClientState(GL_VERTEX_ARRAY);
glTexCoordPointer(2, GL_FLOAT, 0, texCoords);
glVertexPointer(2, GL_FLOAT, 0, vertexCoords);
glDrawArrays(GL_TRIANGLE_STRIP, 0, 4);
glDisableClientState(GL_TEXTURE_COORD_ARRAY);
glDisableClientState(GL_VERTEX_ARRAY);
glPopMatrix();
glMatrixMode(GL_PROJECTION);
glPopMatrix();
// end draw
GLsizei count = width * height * 4;
GLubyte * pixels = (GLubyte *)malloc((sizeof(GLubyte) * count));
glReadPixels(0, 0, width, height, GL_RGBA, GL_UNSIGNED_BYTE, pixels);
glBindRenderbufferOES(GL_RENDERBUFFER_OES, 0);
glBindFramebufferOES(GL_FRAMEBUFFER_OES, 0);
glBindTexture(GL_TEXTURE_2D, oldBind);
glDeleteRenderbuffersOES(1, rbo);
glDeleteFramebuffersOES(1, fbo);
return [NSMutableData dataWithBytes:pixels length:count];
}
What am I doing wrong?
So, I was eventually able to solve this using a variation of the code seen here. My code is:
#interface SPRenderTexture (RenderToImage)
- (NSMutableData *)renderToPixelData;
#end
#implementation SPRenderTexture (RenderToImage)
- (NSMutableData *)renderToPixelData
{
__block NSMutableData * pixels = nil;
[self bundleDrawCalls:^()
{
float scale = [SPStage contentScaleFactor];
int width = scale * self.width;
int height = scale * self.height;
int nrOfColorComponents = 4; //RGBA
int rawImageDataLength = width * height * nrOfColorComponents;
GLenum pixelDataFormat = GL_RGBA;
GLenum pixelDataType = GL_UNSIGNED_BYTE;
GLubyte *rawImageDataBuffer = (GLubyte *) malloc(rawImageDataLength);
glReadPixels(0, 0, width, height, pixelDataFormat, pixelDataType, rawImageDataBuffer);
pixels = [[NSMutableData alloc] initWithBytes:rawImageDataBuffer length:rawImageDataLength];
}];
return [pixels autorelease];
}
#end
I hope this will be useful to other people.
I am a complete idiot at OpenGL, and I can't figure out why my code isn't working, only the clear color shows. I am trying to set up some simple rendering on iOS. Please can somebody help me?
My main scene code is:
//
// Scene.m
#import "Scene.h"
GLfloat vertices[] = {
0.0, 0.0,
1.0, 0.0,
0.0, 1.0,
1.0, 1.0
};
#implementation Scene
+(Class)layerClass {
return [CAEAGLLayer class];
}
- (id)initWithFrame:(CGRect)frame
{
self = [super initWithFrame:frame];
if (self) {
CAEAGLLayer *glLayer = (CAEAGLLayer *)self.layer;
glLayer.opaque = YES;
glLayer.drawableProperties = [NSDictionary dictionaryWithObjectsAndKeys:[NSNumber numberWithBool:FALSE], kEAGLDrawablePropertyRetainedBacking, kEAGLColorFormatRGBA8, kEAGLDrawablePropertyColorFormat, nil];
context = [[EAGLContext alloc] initWithAPI:kEAGLRenderingAPIOpenGLES2];
if (!context) {
NSLog(#"Failed to create 2.0 context");
exit(1);
}
[EAGLContext setCurrentContext:context];
GLuint framebuffer, colorRenderbuffer;
glGenFramebuffers(1, &framebuffer);
glBindFramebuffer(GL_FRAMEBUFFER, framebuffer);
glGenRenderbuffers(1, &colorRenderbuffer);
glBindRenderbuffer(GL_RENDERBUFFER, colorRenderbuffer);
[context renderbufferStorage:GL_RENDERBUFFER fromDrawable:glLayer];
glFramebufferRenderbuffer(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_RENDERBUFFER, colorRenderbuffer);
GLuint vbo;
glGenBuffers(1, &vbo);
glBindBuffer(GL_ARRAY_BUFFER, vbo);
glBufferData(GL_ARRAY_BUFFER, sizeof(vertices), vertices, GL_STATIC_DRAW);
GLuint program = [self loadShaders:#"Test.vp" frag:#"Test.fp"];
positionIndex = glGetAttribLocation(program, "position");
mvpIndex = glGetUniformLocation(program, "mvp");
glLinkProgram(program); //added this
GLint param;
glGetProgramiv(program, GL_LINK_STATUS, ¶m);
if (param == GL_FALSE) {
NSLog(#"Failed to link the program");
exit(1);
}
glUseProgram(program);
CADisplayLink *displayLink = [CADisplayLink displayLinkWithTarget:self selector:#selector(render:)];
[displayLink addToRunLoop:[NSRunLoop mainRunLoop] forMode:NSDefaultRunLoopMode];
}
return self;
}
-(void)render:(CADisplayLink *)_displayLink {
glClearColor(1.0f, 0.0f, 0.0f, 1.0f);
glClear(GL_COLOR_BUFFER_BIT);
glEnableVertexAttribArray(positionIndex);
glVertexAttribPointer(positionIndex, 2, GL_FLOAT, GL_FALSE, 0, (GLvoid *)0);
GLfloat mvp[] = {
1, 0, 0, 0,
0, 1, 0, 0,
0, 0, 1, 0,
0, 0, 0, 1
};
glUniformMatrix4fv(mvpIndex, 1, GL_FALSE, mvp);
glDrawArrays(GL_TRIANGLE_STRIP, 0, 4);
[context presentRenderbuffer:GL_RENDERBUFFER];
glDisableVertexAttribArray(positionIndex);
}
-(GLuint)loadShaders:(NSString *)vertShader frag:(NSString *)fragShader {
NSString *vertPath = [[NSBundle mainBundle] pathForResource:[vertShader stringByDeletingPathExtension] ofType:#"vp"];
NSString *fragPath = [[NSBundle mainBundle] pathForResource:[fragShader stringByDeletingPathExtension] ofType:#"fp"];
if (!vertPath || !fragPath) {
NSLog(#"Failed to find paths of shaders");
exit(1);
}
NSError *error;
const GLchar* vertSource = [[NSString stringWithContentsOfFile:vertPath encoding:NSUTF8StringEncoding error:&error] UTF8String];
const GLchar* fragSource = [[NSString stringWithContentsOfFile:fragPath encoding:NSUTF8StringEncoding error:&error] UTF8String];
if (!vertSource || !fragSource) {
NSLog(#"Faild to load shader sources\n%#", [error localizedDescription]);
exit(1);
}
GLuint vert, frag;
vert = glCreateShader(GL_VERTEX_SHADER);
frag = glCreateShader(GL_FRAGMENT_SHADER);
GLint length = strlen(vertSource);
glShaderSource(vert, 1, &vertSource, &length);
length = strlen(fragSource);
glShaderSource(frag, 1, &fragSource, &length);
glCompileShader(vert);
glCompileShader(frag);
GLint success;
glGetShaderiv(vert, GL_COMPILE_STATUS, &success);
if (success == GL_FALSE) {
GLchar messages[256];
glGetShaderInfoLog(vert, sizeof(messages), 0, &messages[0]);
NSLog(#"Failed to compile vertex shader\n%#", [NSString stringWithUTF8String:messages]);
exit(1);
}
glGetShaderiv(frag, GL_COMPILE_STATUS, &success);
if (success == GL_FALSE) {
GLchar messages[256];
glGetShaderInfoLog(frag, sizeof(messages), 0, &messages[0]);
NSLog(#"Failed to compile fragment shader\n%#", [NSString stringWithUTF8String:messages]);
exit(1);
}
GLuint prog = glCreateProgram();
glAttachShader(prog, vert);
glAttachShader(prog, frag);
//Removed glLinkProgram()
return prog;
}
#end
The shader code is:
attribute vec2 position;
uniform mat4 mvp;
void main(void) {
gl_Position = mvp * vec4(position.xy, -1.0, 1.0);
}
and
void main() {
gl_FragColor = vec4(0.0, 1.0, 0.0, 1.0);
}
Thank you in advance
glEnableVertexAttribArray(positionIndex);
glVertexAttribPointer(positionIndex, 2, GL_FLOAT, GL_FALSE, 0, vertices);
This line is incorrect:
glDisableVertexAttribArray(positionIndex);
Don't disable it before you use it, or you'll lose it
Also, since you are using a vbo for your verts, you don't need to do this:
glVertexAttribPointer(positionIndex, 2, GL_FLOAT, GL_FALSE, 0, vertices);
Simply switch it to this:
glVertexAttribPointer(positionIndex, 2, GL_FLOAT, GL_FALSE, 0, (void*)0);
You have a vbo bound to the GPU, it will use that buffer to draw. Use the glVertexAttribPointer to define the offset into the vbo it will find the first element for the given attribute.
#kibab The suggestion I made for the vbo is correct.
He was receiving EXC_BAD_ACCESS due to his attribute not binding to his shader. See comments below. As for the mvp you are incorrect, again. The mvp is meant to convert points into a coordinate system of -1.0, -1.0, to 1.0, 1.0. Meaning his vertices should be showing given their values.
See http://flylib.com/books/2/789/1/html/2/images/03fig02.jpg.
Also changing the z coordinate will do nothing for you. Change your vertice to start at -1.0,-1.0 and go to 1.0,1.0 a full screen box just so we can get something on screen.
Assuming you aren't using any apple provided gl classes your glViewPort isn't being set.
Try using your view's bound's size
glViewport(0, 0, bouds.width, bounds.height);
Also please put your verts on 0.0 z instead of -1.0
Also lets try this, just to figure out what's going wrong!
Try not using a vertex buffer object and passing it straight over like this.
Also don't worry about disabling your attribute at the moment, lets get something on screen first.
GLfloat vertices[] = {
-1.0f, -1.0f, 0.0f,
1.0f, -1.0f , 0.0f,
-1.0f, 1.0f , 0.0f,
1.0f, 1.0f , 0.0f,};
glVertexAttribPointer(positionIndex,3,GL_FLOAT,0,0,vertices);
glDrawArrays(GL_TRIANGLE_STRIP, 0, 4);
I'm trying to add images to table cells in a grouped UITableView but the corners of the images are not clipped. What's the best way to go about clipping these (besides clipping them in Photoshop? The table contents are dynamic.)
For example, the first image in a table would need the top left corner rounded only.
This was my solution, which could use a little refactoring:
void addRoundedRectToPath(CGContextRef context, CGRect rect, float ovalWidth, float ovalHeight, BOOL top, BOOL bottom)
{
float fw, fh;
if (ovalWidth == 0 || ovalHeight == 0) {
CGContextAddRect(context, rect);
return;
}
CGContextSaveGState(context);
CGContextTranslateCTM (context, CGRectGetMinX(rect), CGRectGetMinY(rect));
CGContextScaleCTM (context, ovalWidth, ovalHeight);
fw = CGRectGetWidth (rect) / ovalWidth;
fh = CGRectGetHeight (rect) / ovalHeight;
CGContextMoveToPoint(context, fw, fh/2);
CGContextAddArcToPoint(context, fw, fh, fw/2, fh, 0);
NSLog(#"bottom? %d", bottom);
if (top) {
CGContextAddArcToPoint(context, 0, fh, 0, fh/2, 3);
} else {
CGContextAddArcToPoint(context, 0, fh, 0, fh/2, 0);
}
if (bottom) {
CGContextAddArcToPoint(context, 0, 0, fw/2, 0, 3);
} else {
CGContextAddArcToPoint(context, 0, 0, fw/2, 0, 0);
}
CGContextAddArcToPoint(context, fw, 0, fw, fh/2, 0);
CGContextClosePath(context);
CGContextRestoreGState(context);
}
- (UIImage *)roundCornersOfImage:(UIImage *)source roundTop:(BOOL)top roundBottom:(BOOL)bottom {
int w = source.size.width;
int h = source.size.height;
CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB();
CGContextRef context = CGBitmapContextCreate(NULL, w, h, 8, 4 * w, colorSpace, kCGImageAlphaPremultipliedFirst);
CGContextBeginPath(context);
CGRect rect = CGRectMake(0, 0, w, h);
addRoundedRectToPath(context, rect, 4, 4, top, bottom);
CGContextClosePath(context);
CGContextClip(context);
CGContextDrawImage(context, CGRectMake(0, 0, w, h), source.CGImage);
CGImageRef imageMasked = CGBitmapContextCreateImage(context);
CGContextRelease(context);
CGColorSpaceRelease(colorSpace);
return [UIImage imageWithCGImage:imageMasked];
}
Implement those functions, then check the indexPath in the cellForRowAtIndexPath delegate method to determine which corner to round.
if (indexPath.row == 0) {
cell.imageView.image = [self roundCornersOfImage:coverImage roundTop:YES roundBottom:NO];
} else if (indexPath.row == [indexPath length]) {
cell.imageView.image = [self roundCornersOfImage:coverImage roundTop:NO roundBottom:YES];
} else {
cell.imageView.image = coverImage;
}
If you're happy to have all four image corners rounded, then you can just do the following when creating the cell:
cell.imageView.layer.masksToBounds = YES;
cell.imageView.layer.cornerRadius = 10.0;
If you also want to inset the image from the boundary, I described a simple category on UIImage to do it here.
There isn't a built-in standard way to do this, but it's not terribly hard to do in your own code. There are examples on how to round corners on an UIImage on the web, see for example http://blog.sallarp.com/iphone-uiimage-round-corners/.
A couple additions/changes, hope it helps someone:
1) roundTop and roundBottom impl changed slightly.
2) made a class method in separate class so reuse is easier, rather than copy/paste everywhere.
First, the new class details:
#import <Foundation/Foundation.h>
#import <QuartzCore/QuartzCore.h>
#interface RoundedImages : NSObject {
}
+(UIImage *)roundCornersOfImage:(UIImage *)source roundTop:(BOOL)top roundBottom:(BOOL)bottom;
#end
And its implementation:
#import "RoundedImages.h"
#implementation RoundedImages
void addRoundedRectToPath(CGContextRef context, CGRect rect, float ovalWidth, float ovalHeight, BOOL top, BOOL bottom)
{
float fw, fh;
if (ovalWidth == 0 || ovalHeight == 0) {
CGContextAddRect(context, rect);
return;
}
CGContextSaveGState(context);
CGContextTranslateCTM (context, CGRectGetMinX(rect), CGRectGetMinY(rect));
CGContextScaleCTM (context, ovalWidth, ovalHeight);
fw = CGRectGetWidth (rect) / ovalWidth;
fh = CGRectGetHeight (rect) / ovalHeight;
CGContextMoveToPoint(context, fw, fh/2);
if (top) {
CGContextAddArcToPoint(context, fw, fh, fw/2, fh, 3);
CGContextAddArcToPoint(context, 0, fh, 0, fh/2, 3);
CGContextAddArcToPoint(context, 0, 0, fw/2, 0, 0);
CGContextAddArcToPoint(context, fw, 0, fw, fh/2, 0);
} else {
CGContextAddArcToPoint(context, fw, fh, fw/2, fh, 0);
CGContextAddArcToPoint(context, 0, fh, 0, fh/2, 0);
CGContextAddArcToPoint(context, 0, 0, fw/2, 0, 3);
CGContextAddArcToPoint(context, fw, 0, fw, fh/2, 3);
}
CGContextClosePath(context);
CGContextRestoreGState(context);
}
+(UIImage *)roundCornersOfImage:(UIImage *)source roundTop:(BOOL)top roundBottom:(BOOL)bottom {
int w = source.size.width;
int h = source.size.height;
CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB();
CGContextRef context = CGBitmapContextCreate(NULL, w, h, 8, 4 * w, colorSpace, kCGImageAlphaPremultipliedFirst);
CGContextBeginPath(context);
CGRect rect = CGRectMake(0, 0, w, h);
//addRoundedRectToPath(context, rect, 4, 4, top, bottom);
addRoundedRectToPath(context, rect, 5, 5, top, bottom);
CGContextClosePath(context);
CGContextClip(context);
CGContextDrawImage(context, CGRectMake(0, 0, w, h), source.CGImage);
CGImageRef imageMasked = CGBitmapContextCreateImage(context);
CGContextRelease(context);
CGColorSpaceRelease(colorSpace);
//return [UIImage imageWithCGImage:imageMasked];
UIImage *image = [UIImage imageWithCGImage:imageMasked];
CGImageRelease(imageMasked);
return image;
}
#end
To use in another class (eg, view controller):
#import "RoundedImages.h"
...and later we use it like this...
UIImageView *imageView = nil;
UIImage *img = [UIImage imageNamed:#"panel.png"];
if (indexPath.row == 0) {
imageView = [[UIImageView alloc]initWithImage:[RoundedImages roundCornersOfImage:img roundTop:YES roundBottom:NO]];
}
else if (indexPath.row == ([choices count]-1))
{
imageView = [[UIImageView alloc]initWithImage:[RoundedImages roundCornersOfImage:img roundTop:NO roundBottom:YES]];
}
else {
imageView = [[UIImageView alloc]initWithImage:img];
}
cell.backgroundColor = [UIColor clearColor];
imageView.clipsToBounds = NO;
cell.backgroundView = imageView;
cell.backgroundView = [[UIView alloc] initWithFrame:CGRectZero];
[cell.backgroundView addSubview:imageView];
[imageView release];
Note that the "choices" above was just a mutable array I was using on this page that contained the data for the tableview.
I should add that the usage snippet above is used inside your cellForRowAtIndexPath method, and "cell" is a uitableviewcell.
Anyway, works like a champ for me.
There is built-i way if you want to just display the rounded corners. Put the image in a UIImageView and then set the cornerRadius of the layer of the UIImageView. You will also need to tell the UIImageView to clip to bounds but that will give you rounded corners.
UIImageView *myImageView = [[UIImageView alloc] initWithImage:...];
[myImageView setClipsToBounds:YES];
[[myImageView layer] setCornerRadius:5.0f];