I need to draw and texture a sphere. The project I am working on will involve a considerable amount of graphics which led me down the route of using VBO's.
I'm currently experiencing trouble trying to texture the sphere and all attempts have led me to a solid shaded sphere each time - with no visible texture. I am most likely doing something silly - but after many attempts, I am no further into understanding whether the problem is because of the texture loading, bad U/V Texture coordinates or using the wrong shaders..
Below is a copy of all of the source code.
//
//
// Copyright (c) 2013 Andy Ward. All rights reserved.
//
#import "SphereRenderer.h"
#import "shaderUtil.h"
#import "fileUtil.h"
#import "debug.h"
//#import <GLKit/GLKit.h>
// Shaders
enum {
PROGRAM_LIGHTING,
PROGRAM_PASSTHRU,
NUM_PROGRAMS
};
enum {
UNIFORM_MVP,
UNIFORM_MODELVIEW,
UNIFORM_MODELVIEWIT,
UNIFORM_LIGHTDIR,
UNIFORM_AMBIENT,
UNIFORM_DIFFUSE,
UNIFORM_SPECULAR,
UNIFORM_SHININESS,
UNIFORM_CONSTANT_COLOR,
NUM_UNIFORMS
};
enum {
ATTRIB_VERTEX,
ATTRIB_COLOR,
ATTRIB_NORMAL,
NUM_ATTRIBS
};
typedef struct {
char *vert, *frag;
GLint uniform[NUM_UNIFORMS];
GLuint id;
} programInfo_t;
programInfo_t program[NUM_PROGRAMS] = {
{ "lighting.vsh", "color.fsh" }, // PROGRAM_LIGHTING
{ "color.vsh", "color.fsh" }, // PROGRAM_PASSTHRU
};
typedef struct
{
float x;
float y;
float z;
float nx;
float ny;
float nz;
float u;
float v;
float r;
float g;
float b;
float a;
GLbyte padding[16];
} Vertex;
static float lightDir[3] = { 0.8, 4.0, 1.0 };
static float ambient[4] = { 0.35, 0.35, 0.35, 0.35 };
static float diffuse[4] = { 1.0-0.35, 1.0-0.35, 1.0-0.35, 1.0 };
static float specular[4] = { 0.8, 0.8, 0.8, 1.0 };
static float shininess = 8;
#implementation SphereRenderer
- (id)init
{
if (self = [super init])
{
angleDelta = -0.05f;
scaleFactor = 7; //max = 1025
r = 350; //scaleFactor * 48.0f;
//maxValue = 1025 * 48.0f;
xVelocity = 1.5f;
yVelocity = 0.0f;
xPos = r*2.0f;
yPos = r*3.0f;
// normalize light dir
lightDirNormalized = GLKVector3Normalize(GLKVector3MakeWithArray(lightDir));
projectionMatrix = GLKMatrix4Identity;
[self LoadTexture];
[self generateSphereData];
[self setupShaders];
}
return self;
}
- (void)makeOrthographicForWidth:(CGFloat)width height:(CGFloat)height
{
projectionMatrix = GLKMatrix4MakeOrtho(0, width, 0, height, -50000.0f, 2000.0f);
}
-(void)generateSphereData
{
#define PI 3.141592654
#define TWOPI 6.283185308
int x;
int index = 0;
float v1x, v1y, v1z;
float v2x, v2y, v2z;
float d;
int theta, phi;
float theta0, theta1;
float phi0, phi1;
Vertex quad[4];
Vertex *sphereData = malloc( 128 * 256* 6 * sizeof( Vertex ) );
float delta = M_PI / 128;
// 32 vertical segments
for(theta = 0; theta < 128; theta++)
{
theta0 = theta*delta;
theta1 = (theta+1)*delta;
// 64 horizontal segments
for(phi = 0; phi < 256; phi++)
{
phi0 = phi*delta;
phi1 = (phi+1)*delta;
// Generate 4 points per quad
quad[0].x = r * sin(theta0) * cos(phi0);
quad[0].y = r * cos(theta0);
quad[0].z = r * sin(theta0) * sin(phi0);
quad[0].u = (float)theta / (float)128;
quad[0].v = (float)phi / (float)256;
quad[1].x = r * sin(theta0) * cos(phi1);
quad[1].y = r * cos(theta0);
quad[1].z = r * sin(theta0) * sin(phi1);
quad[1].u = (float)theta / (float)128;
quad[1].v = (float)(phi + 1) / (float)256;
quad[2].x = r * sin(theta1) * cos(phi1);
quad[2].y = r * cos(theta1);
quad[2].z = r * sin(theta1) * sin(phi1);
quad[2].u = (float)(theta + 1)/ (float)128;
quad[2].v = (float)(phi + 1) / (float)256;
quad[3].x = r * sin(theta1) * cos(phi0);
quad[3].y = r * cos(theta1);
quad[3].z = r * sin(theta1) * sin(phi0);
quad[3].u = (float)(theta + 1) / (float)128;
quad[3].v = (float)phi / (float)256;
// Generate the normal
if(theta >= 4)
{
v1x = quad[1].x - quad[0].x;
v1y = quad[1].y - quad[0].y;
v1z = quad[1].z - quad[0].z;
v2x = quad[3].x - quad[0].x;
v2y = quad[3].y - quad[0].y;
v2z = quad[3].z - quad[0].z;
}
else
{
v1x = quad[0].x - quad[3].x;
v1y = quad[0].y - quad[3].y;
v1z = quad[0].z - quad[3].z;
v2x = quad[2].x - quad[3].x;
v2y = quad[2].y - quad[3].y;
v2z = quad[2].z - quad[3].z;
}
quad[0].nx = ( v1y * v2z ) - ( v2y * v1z );
quad[0].ny = ( v1z * v2x ) - ( v2z * v1x );
quad[0].nz = ( v1x * v2y ) - ( v2x * v1y );
d = 1.0f/sqrt(quad[0].nx*quad[0].nx +
quad[0].ny*quad[0].ny +
quad[0].nz*quad[0].nz);
quad[0].nx *= d;
quad[0].ny *= d;
quad[0].nz *= d;
// Generate the color - This was for testing until I have the textures loading...
if((theta ^ phi) & 1)
{
quad[0].r = 0.0f;
quad[0].g = 0.0f;
quad[0].b = 0.0f;
quad[0].a = 0.0f;
}
else
{
quad[0].r = 0.0f;
quad[0].g = 0.0f;
quad[0].b = 0.0f;
quad[0].a = 0.0f;
}
// Replicate vertex info.
for(x = 1; x < 4; x++)
{
quad[x].nx = quad[0].nx;
quad[x].ny = quad[0].ny;
quad[x].nz = quad[0].nz;
quad[x].r = quad[0].r;
quad[x].g = quad[0].g;
quad[x].b = quad[0].b;
quad[x].a = quad[0].a;
}
// Store the vertices in two triangles. We are drawing everything as triangles.
sphereData[index++] = quad[0];
sphereData[index++] = quad[1];
sphereData[index++] = quad[2];
sphereData[index++] = quad[0];
sphereData[index++] = quad[3];
sphereData[index++] = quad[2];
}
}
// Create the VAO
glGenVertexArrays(1, &vaoId);
glBindVertexArray(vaoId);
// Create a VBO buffer
glGenBuffers(1, &vboId);
glBindBuffer(GL_ARRAY_BUFFER, vboId);
glBufferData(GL_ARRAY_BUFFER, 128 * 256 * 6 * sizeof(Vertex), NULL, GL_STATIC_DRAW);
glBufferSubData(GL_ARRAY_BUFFER, 0, 128 * 256 * 6 * sizeof(Vertex), sphereData);
// set the colors - left as it's great for debugging
glEnableVertexAttribArray(ATTRIB_COLOR);
glVertexAttribPointer(ATTRIB_COLOR, 4, GL_FLOAT, GL_TRUE, sizeof(Vertex), (GLubyte *)(uintptr_t)offsetof(Vertex,r));
// set the normals
glEnableVertexAttribArray(ATTRIB_NORMAL);
glVertexAttribPointer(ATTRIB_NORMAL, 3, GL_FLOAT, GL_FALSE, sizeof(Vertex), (GLubyte *)(uintptr_t)offsetof(Vertex,nx));
// set the texture
glEnableVertexAttribArray(1);
glError();
glVertexAttribPointer(1, 2, GL_FLOAT, GL_FALSE, sizeof(Vertex), (GLubyte *)(uintptr_t)offsetof(Vertex,u));
glError();
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, texture);
// set the positions
glEnableVertexAttribArray(ATTRIB_VERTEX);
glVertexAttribPointer(ATTRIB_VERTEX, 3, GL_FLOAT, GL_FALSE, sizeof(Vertex), (GLubyte *)(uintptr_t)offsetof(Vertex,x));
//We need to free as we used malloc
free(sphereData);
}
-(void)LoadTexture
{
NSURL *url = nil;
CGImageSourceRef src;
CGImageRef image;
CGContextRef context = nil;
CGColorSpaceRef colorSpace;
GLubyte *data;
GLsizei width, height;
// NSImage* image = [NSImage imageNamed:#"World-satellite-map.png"];
NSBundle *bundle = [NSBundle bundleWithIdentifier: #"Award.WeatherEye3D"];
NSString *bundleRoot = [bundle pathForImageResource:#"World-satellite-map.png"];
url = [NSURL fileURLWithPath: bundleRoot];
src = CGImageSourceCreateWithURL((CFURLRef)url, NULL);
if (!src) {
NSLog(#"No image");
// free(data);
return;
}
image = CGImageSourceCreateImageAtIndex(src, 0, NULL);
CFRelease(src);
width = CGImageGetWidth(image);
height = CGImageGetHeight(image);
data = (GLubyte*) calloc(width * height * 4, sizeof(GLubyte));
colorSpace = CGColorSpaceCreateDeviceRGB();
context = CGBitmapContextCreate(data, width, height, 8, 4 * width, colorSpace, kCGImageAlphaPremultipliedFirst | kCGBitmapByteOrder32Host);
CGColorSpaceRelease(colorSpace);
// Core Graphics referential is upside-down compared to OpenGL referential
// Flip the Core Graphics context here
// An alternative is to use flipped OpenGL texture coordinates when drawing textures
CGContextTranslateCTM(context, 0.0, height);
CGContextScaleCTM(context, 1.0, -1.0);
// Set the blend mode to copy before drawing since the previous contents of memory aren't used. This avoids unnecessary blending.
CGContextSetBlendMode(context, kCGBlendModeCopy);
CGContextDrawImage(context, CGRectMake(0, 0, width, height), image);
CGContextRelease(context);
CGImageRelease(image);
glGenTextures(1, &texture);
glGenBuffers(1, &pboId);
// Bind the texture
glBindTexture(GL_TEXTURE_2D, texture);
// Bind the PBO
glBindBuffer(GL_PIXEL_UNPACK_BUFFER, pboId);
// Upload the texture data to the PBO
glBufferData(GL_PIXEL_UNPACK_BUFFER, width * height * 4 * sizeof(GLubyte), data, GL_STATIC_DRAW);
// Setup texture parameters
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glPixelStorei(GL_UNPACK_ROW_LENGTH, 0);
// OpenGL likes the GL_BGRA + GL_UNSIGNED_INT_8_8_8_8_REV combination
// Use offset instead of pointer to indictate that we want to use data copied from a PBO
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, width, height, 0,
GL_BGRA, GL_UNSIGNED_INT_8_8_8_8_REV, 0);
// We can delete the application copy of the texture data now
free(data);
glBindTexture(GL_TEXTURE_2D, 0);
glBindBuffer(GL_PIXEL_UNPACK_BUFFER, 0);
}
- (void)setupShaders
{
//This code has been lifted from an example.
for (int i = 0; i < NUM_PROGRAMS; i++)
{
char *vsrc = readFile(pathForResource(program[i].vert));
char *fsrc = readFile(pathForResource(program[i].frag));
GLsizei attribCt = 0;
GLchar *attribUsed[NUM_ATTRIBS];
GLint attrib[NUM_ATTRIBS];
GLchar *attribName[NUM_ATTRIBS] = {
"inVertex", "inColor", "inNormal",
};
const GLchar *uniformName[NUM_UNIFORMS] = {
"MVP", "ModelView", "ModelViewIT", "lightDir", "ambient", "diffuse", "specular", "shininess", "constantColor",
};
// auto-assign known attribs
for (int j = 0; j < NUM_ATTRIBS; j++)
{
if (strstr(vsrc, attribName[j]))
{
attrib[attribCt] = j;
attribUsed[attribCt++] = attribName[j];
}
}
glueCreateProgram(vsrc, fsrc,
attribCt, (const GLchar **)&attribUsed[0], attrib,
NUM_UNIFORMS, &uniformName[0], program[i].uniform,
&program[i].id);
free(vsrc);
free(fsrc);
// set constant uniforms
glUseProgram(program[i].id);
if (i == PROGRAM_LIGHTING)
{
// Set up lighting stuff used by the shaders
glUniform3fv(program[i].uniform[UNIFORM_LIGHTDIR], 1, lightDirNormalized.v);
glUniform4fv(program[i].uniform[UNIFORM_AMBIENT], 1, ambient);
glUniform4fv(program[i].uniform[UNIFORM_DIFFUSE], 1, diffuse);
glUniform4fv(program[i].uniform[UNIFORM_SPECULAR], 1, specular);
glUniform1f(program[i].uniform[UNIFORM_SHININESS], shininess);
}
else if (i == PROGRAM_PASSTHRU)
{
glUniform4f(program[i].uniform[UNIFORM_CONSTANT_COLOR], 0.0f,0.0f,0.0f,0.4f);
}
}
glError();
}
- (void)update
{
yPos = 400;
xPos = 375;
}
- (void)render
{
GLKMatrix4 modelViewMatrix, MVPMatrix, modelViewMatrixIT;
GLKMatrix3 normalMatrix;
glBindVertexArray(vaoId);
// glBindTexture(GL_TEXTURE, texture);
// Draw "shadow"
/* glUseProgram(program[PROGRAM_PASSTHRU].id);
glEnable(GL_CULL_FACE);
glDisable(GL_DEPTH_TEST);
glDepthMask(GL_FALSE);
glEnable(GL_BLEND);
glBlendFunc(GL_SRC_ALPHA_SATURATE, GL_ONE_MINUS_SRC_ALPHA);
*/
/*// Make the "shadow" move around a bit. This is not a real shadow projection.
GLKVector3 pos = GLKVector3Normalize(GLKVector3Make(xPos, yPos, -100.0f));
modelViewMatrix = GLKMatrix4MakeTranslation(xPos + (pos.v[0]-lightDirNormalized.v[0])*20.0,
yPos + (pos.v[1]-lightDirNormalized.v[1])*10.0,
-800.0f);
modelViewMatrix = GLKMatrix4Rotate(modelViewMatrix, -16.0f, 0.0f, 0.0f, 1.0f);
modelViewMatrix = GLKMatrix4Rotate(modelViewMatrix, angle, 0.0f, 1.0f, 0.0f);
modelViewMatrix = GLKMatrix4Scale(modelViewMatrix, 1.05f, 1.05f, 1.05f);
MVPMatrix = GLKMatrix4Multiply(projectionMatrix, modelViewMatrix);
glUniformMatrix4fv(program[PROGRAM_PASSTHRU].uniform[UNIFORM_MVP], 1, GL_FALSE, MVPMatrix.m);
//Draw the shadow arrays
glDrawArrays(GL_TRIANGLES, 0, 32*64*6);
*/
// Draw Sphere
glUseProgram(program[PROGRAM_LIGHTING].id);
glEnable(GL_DEPTH_TEST);
glDepthMask(GL_TRUE);
glDepthFunc(GL_LESS);
glDisable(GL_BLEND);
glCullFace(GL_BACK);
glFrontFace(GL_CCW);
glEnable(GL_CULL_FACE);
// ModelView
modelViewMatrix = GLKMatrix4MakeTranslation(xPos, yPos, -200.0f); // was -100
//modelViewMatrix = GLKMatrix4Rotate(modelViewMatrix, -0.01f, 0.0f, 0.0f, 0.01f);
// modelViewMatrix = GLKMatrix4Rotate(modelViewMatrix, angle, 0.0f, 1.0f, 0.0f);
glUniformMatrix4fv(program[PROGRAM_LIGHTING].uniform[UNIFORM_MODELVIEW], 1, GL_FALSE, modelViewMatrix.m);
// MVP
MVPMatrix = GLKMatrix4Multiply(projectionMatrix, modelViewMatrix);
glUniformMatrix4fv(program[PROGRAM_LIGHTING].uniform[UNIFORM_MVP], 1, GL_FALSE, MVPMatrix.m);
// ModelViewIT (normal matrix)
bool success;
modelViewMatrixIT = GLKMatrix4InvertAndTranspose(modelViewMatrix, &success);
if (success) {
normalMatrix = GLKMatrix4GetMatrix3(modelViewMatrixIT);
glUniformMatrix3fv(program[PROGRAM_LIGHTING].uniform[UNIFORM_MODELVIEWIT], 1, GL_FALSE, normalMatrix.m);
}
glDrawArrays(GL_TRIANGLE_STRIP, 0, 128*256*6 ); // Value needs changing for number of triangles...
glUseProgram(0);
glError();
}
- (void)dealloc
{
if (vboId) {
glDeleteBuffers(1, &vboId);
vboId = 0;
}
if (vaoId) {
glDeleteVertexArrays(1, &vaoId);
vaoId = 0;
}
if (vertexShader) {
glDeleteShader(vertexShader);
vertexShader = 0;
}
if (fragmentShader) {
glDeleteShader(fragmentShader);
fragmentShader = 0;
}
if (shaderProgram) {
glDeleteProgram(shaderProgram);
shaderProgram = 0;
}
[super dealloc];
}
#end
Lighting.vsh : -
#version 150
in vec4 inVertex, inColor;
in vec3 inNormal;
out vec4 color;
uniform mat4 MVP, ModelView;
uniform mat3 ModelViewIT;
uniform vec3 lightDir;
uniform vec4 ambient, diffuse, specular;
uniform float shininess;
void main()
{
// transform position to clip space
gl_Position = MVP * inVertex;
// transform position to eye space
vec3 eyePosition = vec3(ModelView * inVertex);
// transform normal to eye space (normalization skipped here: inNormal already normalized, matrix not scaled)
vec3 eyeNormal = ModelViewIT * inNormal;
// directional light ambient and diffuse contribution (lightDir alreay normalized)
float NdotL = max(dot(eyeNormal, lightDir), 0.0);
vec4 lightColor = ambient + diffuse * NdotL;
if (NdotL > 0.0)
{
// half angle
vec3 H = normalize(lightDir - normalize(eyePosition));
// specular contribution
float NdotH = max(dot(eyeNormal, H), 0.0);
lightColor += specular * pow(NdotH, shininess);
}
// apply directional light color and saturate result
// to match fixed function behavior
color = min(inColor * lightColor, 1.0);
}
color.vsh : -
#version 150
in vec4 inVertex;
out vec4 color;
uniform mat4 MVP;
uniform vec4 constantColor;
void main()
{
gl_Position = MVP * inVertex;
color = constantColor;
}
Color.fsh: -
#version 150
in vec4 color;
out vec4 fragColor;
void main()
{
fragColor = color;
}
For texture loading I always double check the modes I'm using.
For your shaders I would check the #version of vertex and fragment shaders and make sure it plays nicely with whatever version of OpenGL you have installed or whatever your video card supports. I used to do a lot with JOGL and whenever I used #version 330 instead of #version 400 it was because my video card wasn't one of the newest models at the time and didn't support any shader beyond 330. There is actually quite some difference between versions 150 and 400 so if you are doing anything more advanced in your GL code than what your shaders can support, your textures won't load. (i.e. there was a major change in OpenGL around there where it was no longer fixed function pipeline, it was then all programmable pipeline and therefore you had much more control at the cost of having to do more work....like write your own VBO, heh)
There are also certain functions in GLSL that are different from version to version and when you are going that far back to 150, many of the newer ones won't be recognized.
Here is a good reference for shader language and what versions of OpenGL they are compatible with http://en.wikipedia.org/wiki/OpenGL_Shading_Language. I know it's just wiki, but all the version mappings on there look correct.
Also, I always had to check the direction of my normals. If they are upside down or in the opposite direction they're supposed to be (like pointing inward instead of outward) then your lighting and textures also won't work.
Here's an example of a shader I wrote a while back before I started using the newer shader versions:
v.glsl
#version 130
in vec4 vPosition;
in vec4 vColor;
in vec3 vNormal;
in vec2 vTexCoord;
out vec4 color;
out vec3 E,L,N;
out vec2 texCoord;
uniform vec4 LightPosition;
uniform vec4 Projection, Model, View;
void main() {
vec3 pos = (Model * vPosition).xyz;
E = normalize((View * vec4(0,0,0,1)).xyz-pos);
//camera eye
L = normalize(LightPosition.xyz - pos);
N = normalize(Model * vec4(vNormal, 0.0)).xyz; //set normal vector
gl_Position = Projection * View * Model * vPosition; //view mode: Projection
texCoord = vTextCoord; //output vector of texture coordinates
color = vColor; //output vector that tells you the color of each vertex
}
f.glsl
#version 130
in vec4 color;
in vec2 texCoord;
in vec3 N,L,E;
out vec4 fColor;
uniform sampler2D texture;
uniform vec4 GlobalAmbient, AmbientProduct, DiffuseProduct, SpecularProduct;
uniform vec3 LightDirection;
uniform float Shininess, CutoffAngle, LightIntensity;
void main() {
vec3 D, H;
//process the spotlight
D = normalize(LightDirection);
H = normalize(L+E); //normalize the sum of the Light and Camera (Eye) vectors
vec4 ambient = vec4(0,0,0,0);
vec4 diffuse = vec4(0,0,0,1);
vec4 specular = vec4(0,0,0,1);
vec4 color = vec4(0,0,0,0);
//spot coefficient
float Kc = LightIntensity * max(dot(D,-L)-CutoffAngle,0.0);
//ambient coefficient
ambient = (Kc*AmbientProduct) + ambient + GlobalAmbient;
//diffuse coefficient
float Kd = max(dot(L,N), 0.0);
//diffuse component
diffuse = Kc * Kd * DiffuseProduct + diffuse;
//specular coefficient
float Ks = pow(max(dot(E,H), 0.0), Shininess);
//specular component
if(dot(L,N) >= 0.0) {
specular = Kc * Ks * SpecularProduct + specular;
}
fColor = (color + ambient + diffuse + specular) * texture2D(texture, texCoord);
fColor.a = 1.0; //fully opaque
}
I'll take a look at this some more when I get home because I love graphics. Now again, this shader code talks to Java code (using JOGL libs) so it will be done differently in Objective C, but the ideas are all the same.
Also check the order of your gl function calls - that can make a difference in many cases.
In ObjC I'd imagine you'd hand over your pixel data like this:
- (GLuint)setupTexture:(NSString *)fileName {
CGImageRef spriteImage = [UIImage imageNamed:fileName].CGImage;
if (!spriteImage) {
NSLog(#"Failed to load image %#", fileName);
exit(1);
}
size_t width = CGImageGetWidth(spriteImage);
size_t height = CGImageGetHeight(spriteImage);
GLubyte * spriteData = (GLubyte *) calloc(width*height*4, sizeof(GLubyte));
CGContextRef spriteContext = CGBitmapContextCreate(spriteData, width, height, 8, width*4,
CGImageGetColorSpace(spriteImage), kCGImageAlphaPremultipliedLast);
CGContextDrawImage(spriteContext, CGRectMake(0, 0, width, height), spriteImage);
CGContextRelease(spriteContext);
GLuint texName;
glGenTextures(1, &texName);
glBindTexture(GL_TEXTURE_2D, texName);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, width, height, 0, GL_RGBA, GL_UNSIGNED_BYTE, spriteData);
free(spriteData);
return texName;
}
I'm currently using this technique to get the color of a pixel in a UIimage. (on Ios)
- (UIColor*) getPixelColorAtLocation:(CGPoint)point {
UIColor* color = nil;
CGImageRef inImage = self.image.CGImage;
// Create off screen bitmap context to draw the image into. Format ARGB is 4 bytes for each pixel: Alpa, Red, Green, Blue
CGContextRef cgctx = [self createARGBBitmapContextFromImage:inImage];
if (cgctx == NULL) { return nil; /* error */ }
size_t w = CGImageGetWidth(inImage);
size_t h = CGImageGetHeight(inImage);
CGRect rect = {{0,0},{w,h}};
// Draw the image to the bitmap context. Once we draw, the memory
// allocated for the context for rendering will then contain the
// raw image data in the specified color space.
CGContextDrawImage(cgctx, rect, inImage);
// Now we can get a pointer to the image data associated with the bitmap
// context.
unsigned char* data = CGBitmapContextGetData (cgctx);
if (data != NULL) {
//offset locates the pixel in the data from x,y.
//4 for 4 bytes of data per pixel, w is width of one row of data.
int offset = 4*((w*round(point.y))+round(point.x));
int alpha = data[offset];
int red = data[offset+1];
int green = data[offset+2];
int blue = data[offset+3];
NSLog(#"offset: %i colors: RGB A %i %i %i %i",offset,red,green,blue,alpha);
color = [UIColor colorWithRed:(red/255.0f) green:(green/255.0f) blue:(blue/255.0f) alpha:(alpha/255.0f)];
}
// When finished, release the context
CGContextRelease(cgctx);
// Free image data memory for the context
if (data) { free(data); }
return color;
}
As illustrated here;
http://www.markj.net/iphone-uiimage-pixel-color/
it works quite well, but when working with images larger than the UIImageView it fails. I tried adding an image and changing the scaling mode to fit the view. How would I modify the code to so that it would still be able to sample the pixel color with a scaled image.
try this for swift3
func getPixelColor(image: UIImage, x: Int, y: Int, width: CGFloat) -> UIColor
{
let pixelData = CGDataProviderCopyData(CGImageGetDataProvider(image.CGImage))
let data: UnsafePointer<UInt8> = CFDataGetBytePtr(pixelData)
let pixelInfo: Int = ((Int(width) * y) + x) * 4
let r = CGFloat(data[pixelInfo]) / CGFloat(255.0)
let g = CGFloat(data[pixelInfo+1]) / CGFloat(255.0)
let b = CGFloat(data[pixelInfo+2]) / CGFloat(255.0)
let a = CGFloat(data[pixelInfo+3]) / CGFloat(255.0)
return UIColor(red: r, green: g, blue: b, alpha: a)
}
Here's a pointer:
0x3A28213A //sorry, I couldn't resist the joke
For real now: after going through the comments on the page at markj.net, a certain James has suggested to make the following changes:
size_t w = CGImageGetWidth(inImage); //Written by Mark
size_t h = CGImageGetHeight(inImage); //Written by Mark
float xscale = w / self.frame.size.width;
float yscale = h / self.frame.size.height;
point.x = point.x * xscale;
point.y = point.y * yscale;
(thanks to http://www.markj.net/iphone-uiimage-pixel-color/comment-page-1/#comment-2159)
This didn't actually work for me... Not that I did much testing, and I'm not the world's greatest programmer (yet)...
My solution was to scale the UIImageView in such a way that each pixel of the image in it was the same size as a standard CGPoint on the screen, then I took my color like normal (using getPixelColorAtLocation:(CGPoint)point) , then I scaled the image back to the size I wanted.
Hope this helps!
Use the UIImageView Layer:
- (UIColor*) getPixelColorAtLocation:(CGPoint)point {
UIColor* color = nil;
UIGraphicsBeginImageContext(self.frame.size);
CGContextRef cgctx = UIGraphicsGetCurrentContext();
if (cgctx == NULL) { return nil; /* error */ }
[self.layer renderInContext:cgctx];
unsigned char* data = CGBitmapContextGetData (cgctx);
/*
...
*/
UIGraphicsEndImageContext();
return color;
}
I am devleoping a game with cocos2d-iphone.
I want to great a circular health bar. Think of Kingdom Hearts or something.
I am able to draw circles with ccDrawLine, but they are full circles. Basically, I need to be able to draw up to a certain circumference value to represent the health properly. However, I am not really sure about this. Any ideas?
I had a quick look at the code for ccDrawCircle. If I was approaching this, I'd probably start by modifying the way the for loop works (maybe by playing with coef or segs) so that it stops early.
void ccDrawCircle( CGPoint center, float r, float a, NSUInteger segs, BOOL drawLineToCenter)
{
int additionalSegment = 1;
if (drawLineToCenter)
additionalSegment++;
const float coef = 2.0f * (float)M_PI/segs;
GLfloat *vertices = calloc( sizeof(GLfloat)*2*(segs+2), 1);
if( ! vertices )
return;
for(NSUInteger i=0;i<=segs;i++)
{
float rads = i*coef;
GLfloat j = r * cosf(rads + a) + center.x;
GLfloat k = r * sinf(rads + a) + center.y;
vertices[i*2] = j * CC_CONTENT_SCALE_FACTOR();
vertices[i*2+1] =k * CC_CONTENT_SCALE_FACTOR();
}
vertices[(segs+1)*2] = center.x * CC_CONTENT_SCALE_FACTOR();
vertices[(segs+1)*2+1] = center.y * CC_CONTENT_SCALE_FACTOR();
// Default GL states: GL_TEXTURE_2D, GL_VERTEX_ARRAY, GL_COLOR_ARRAY, GL_TEXTURE_COORD_ARRAY
// Needed states: GL_VERTEX_ARRAY,
// Unneeded states: GL_TEXTURE_2D, GL_TEXTURE_COORD_ARRAY, GL_COLOR_ARRAY
glDisable(GL_TEXTURE_2D);
glDisableClientState(GL_TEXTURE_COORD_ARRAY);
glDisableClientState(GL_COLOR_ARRAY);
glVertexPointer(2, GL_FLOAT, 0, vertices);
glDrawArrays(GL_LINE_STRIP, 0, (GLsizei) segs+additionalSegment);
// restore default state
glEnableClientState(GL_COLOR_ARRAY);
glEnableClientState(GL_TEXTURE_COORD_ARRAY);
glEnable(GL_TEXTURE_2D);
free( vertices );
}
CGContextAddArc() will do the trick. An example explains everything.
CGContextAddArc(CGFloat centerX, CGFloat centerY, CGFloat radius, CGFloat startAngle, CGFloat endAngle, int clockwise);
I'm not quite sure about the order of the parameters, you'd better google it or let XCode do the work for you.