NSOpenGL I420 YUV buffer render on OSX - objective-c

In my application, i have to display the YUV frame receiving from the server, YUV image format is I420,
I have subclass NSOpenGLView like this,
#interface RTCNSGLVideoView : NSOpenGLView <RTCVideoRenderer>
and OpenGL View creating on the View something like this,
if ( bUsingOpenGL && !pVideoRTCGLView){
NSOpenGLPixelFormatAttribute attributes[] = {
NSOpenGLPFADoubleBuffer,
NSOpenGLPFADepthSize, 24,
NSOpenGLPFAOpenGLProfile,
NSOpenGLProfileVersion3_2Core,
0
};
NSRect frameRect = [pImageView frame];
NSOpenGLPixelFormat* pixelFormat =
[[NSOpenGLPixelFormat alloc] initWithAttributes:attributes];
pVideoRTCGLView = [[RTCNSGLVideoView alloc] initWithFrame:frameRect
pixelFormat:pixelFormat];
[pVideoRTCGLView setAutoresizingMask:[pImageView autoresizingMask]];
[pVideoRTCGLView setTranslatesAutoresizingMaskIntoConstraints:NO];
[pVideoRTCGLView setYUVFrameDelegate:self];
[[[self window] contentView] addSubview:pVideoRTCGLView];
[pVideoRTCGLView prepareOpenGL];
}
I am displaying I420 directly on the screen, and for that, Vertex and shaders program are like this,
#define RTC_PIXEL_FORMAT GL_RED
#define SHADER_VERSION "#version 150\n"
#define VERTEX_SHADER_IN "in"
#define VERTEX_SHADER_OUT "out"
#define FRAGMENT_SHADER_IN "in"
#define FRAGMENT_SHADER_OUT "out vec4 fragColor;\n"
#define FRAGMENT_SHADER_COLOR "fragColor"
#define FRAGMENT_SHADER_TEXTURE "texture"
#endif
// Vertex shader doesn't do anything except pass coordinates through.
static const char kVertexShaderSource[] =
SHADER_VERSION
VERTEX_SHADER_IN " vec2 position;\n"
VERTEX_SHADER_IN " vec2 texcoord;\n"
VERTEX_SHADER_OUT " vec2 v_texcoord;\n"
"void main() {\n"
" gl_Position = vec4(position.x, position.y, 0.0, 1.0);\n"
" v_texcoord = texcoord;\n"
"}\n";
// Fragment shader converts YUV values from input textures into a final RGB
// pixel. The conversion formula is from http://www.fourcc.org/fccyvrgb.php.
static const char kFragmentShaderSource[] =
SHADER_VERSION
"precision highp float;"
FRAGMENT_SHADER_IN " vec2 v_texcoord;\n"
"uniform lowp sampler2D s_textureY;\n"
"uniform lowp sampler2D s_textureU;\n"
"uniform lowp sampler2D s_textureV;\n"
FRAGMENT_SHADER_OUT
"void main() {\n"
" float y, u, v, r, g, b;\n"
" y = " FRAGMENT_SHADER_TEXTURE "(s_textureY, v_texcoord).r;\n"
" u = " FRAGMENT_SHADER_TEXTURE "(s_textureU, v_texcoord).r;\n"
" v = " FRAGMENT_SHADER_TEXTURE "(s_textureV, v_texcoord).r;\n"
" u = u - 0.5;\n"
" v = v - 0.5;\n"
" r = y + 1.403 * v;\n"
" g = y - 0.344 * u - 0.714 * v;\n"
" b = y + 1.770 * u;\n"
" " FRAGMENT_SHADER_COLOR " = vec4(r, g, b, 1.0);\n"
" }\n";
// Compiles a shader of the given |type| with GLSL source |source| and returns
// the shader handle or 0 on error.
GLuint CreateShader(GLenum type, const GLchar* source) {
GLuint shader = glCreateShader(type);
if (!shader) {
return 0;
}
glShaderSource(shader, 1, &source, NULL);
glCompileShader(shader);
GLint compileStatus = GL_FALSE;
glGetShaderiv(shader, GL_COMPILE_STATUS, &compileStatus);
if (compileStatus == GL_FALSE) {
glDeleteShader(shader);
shader = 0;
}
return shader;
}
// Links a shader program with the given vertex and fragment shaders and
// returns the program handle or 0 on error.
GLuint CreateProgram(GLuint vertexShader, GLuint fragmentShader) {
if (vertexShader == 0 || fragmentShader == 0) {
return 0;
}
GLuint program = glCreateProgram();
if (!program) {
return 0;
}
glAttachShader(program, vertexShader);
glAttachShader(program, fragmentShader);
glLinkProgram(program);
GLint linkStatus = GL_FALSE;
glGetProgramiv(program, GL_LINK_STATUS, &linkStatus);
if (linkStatus == GL_FALSE) {
glDeleteProgram(program);
program = 0;
}
return program;
}
In the Subclass view, i am setting the view port size like this,
- (void)reshape {
[super reshape];
NSRect frame = [self frame];
NSLog(#"reshaping viewport (%#) ",NSStringFromRect(frame));
CGLLockContext([[self openGLContext] CGLContextObj]);
glViewport(0, 0, frame.size.width, frame.size.height);
CGLUnlockContext([[self openGLContext] CGLContextObj]);
}
What i am seeing is bottom and right side is bit corrupted, please refer the image,
Please help me, not able to figure it out, what is going wrong,
if you refer the image, where i drawn Red rectangle at the bottom and on the right side this is coming unnecessary

Related

NSOpengl view doesn't work

Hi I have been trying to make my first opengl "app" in cocoa using NSOpenGLView. I want to clear the background with blue color and draw red dot but view is white. And it doesn't draw red dot. Maybe I should use this with core video to refresh it in loop. This is the code from "OpenGL Superbible " book so i think that it fault of cocoa.
#import <Cocoa/Cocoa.h>
#interface View : NSOpenGLView
#end
//---------------------------------
#import "View.h"
#include <OpenGL/gl3.h>
#implementation View
GLuint rendering_program;
GLuint VAO;
-(void)awakeFromNib{
NSOpenGLPixelFormatAttribute pixelFormatAttributes[] =
{
NSOpenGLPFAOpenGLProfile, NSOpenGLProfileVersion3_2Core,
NSOpenGLPFAColorSize , 24 ,
NSOpenGLPFAAlphaSize , 8 ,
NSOpenGLPFADoubleBuffer ,
NSOpenGLPFAAccelerated ,
NSOpenGLPFANoRecovery ,
0
};
NSOpenGLPixelFormat *pixelFormat = [[NSOpenGLPixelFormat alloc] initWithAttributes:pixelFormatAttributes] ;
NSOpenGLContext* glc = [[NSOpenGLContext alloc]initWithFormat:pixelFormat shareContext:nil];
[self setOpenGLContext:glc];
}
-(void)prepareOpenGL{
GLuint vs;
GLuint fs;
const GLchar*vss[] = {
"#version 330 core \n"
"void main (void)\n"
"{\n"
"gl_Position = vec4(0.0,0.0,0.5,1.0);\n"
"}\n"
};
vs = glCreateShader(GL_VERTEX_SHADER);
glShaderSource(vs, 1, vss, 0);
glCompileShader(vs);
int s;
glGetShaderiv(vs, GL_COMPILE_STATUS, &s);
if(!s)printf("ok");
const GLchar*fss[] = {
"#version 330 core \n"
"out vec4 color;"
"void main (void)\n"
"{\n"
"color = vec4(1.0,0.0,0.0,1.0);\n"
"}\n"
};
fs = glCreateShader(GL_FRAGMENT_SHADER);
glShaderSource(fs, 1,fss, 0);
glCompileShader(fs);
int k;
glGetShaderiv(fs, GL_COMPILE_STATUS, &k);
if(!k)printf("okkk");
rendering_program = glCreateProgram();
glAttachShader(rendering_program,vs );
glAttachShader(rendering_program,fs );
glLinkProgram(rendering_program);
int n;
glGetProgramiv(rendering_program, GL_LINK_STATUS, &n);
glDeleteShader(vs);
glDeleteShader(fs);
glGenVertexArrays(1, &VAO);
glBindVertexArray(VAO);
}
-(void)drawRect:(NSRect)dirtyRect{
glPointSize(40);
glClearColor(0, 0, 1, 1);
glClear(GL_COLOR_BUFFER_BIT);
glUseProgram(rendering_program);
glDrawArrays(GL_POINTS, 0, 1);
glFlush();
}
#end
And Here is the screenshot:
Since you are using double buffering (NSOpenGLPFADoubleBuffer), you have to swap the buffers after rendering:
[[self openGLContext] flushBuffer]
Without double buffering (single buffer), glFlush() would be sufficient.
See also glFlush() vs [[self openGLContext] flushBuffer].

SceneKit and with GLSL - how to add shader (GLSL) to a geometry

I'm learning SceneKit and while using GLSL. I'm having hard time understanding using glsl with SceneKit, e.g., how to load glsl shaders in SceneKit and apply it to a geometry.
lets say we have:
SCNBox *box = [SCNBox boxWithWidth:50 height:50 length:50 chamferRadius:0];
SCNNode *bNode = [SCNNode nodeWithGeometry:box];
SCNMaterial *redMaterial = [SCNMaterial material];
redMaterial.diffuse.contents = [UIColor redColor];
redMaterial.locksAmbientWithDiffuse = YES;
box.materials = #[redMaterial];
[scene.rootNode addChildNode:bNode];
using the glass glsl example codes by apple from year 2006 how one can add this effect to a geometry. do one needs to bind those parameters from Glass.vert to SceneKit geometry? Originally, I'm trying to achieve glass effect and water effect.
Glass effect has 2 files:
1. file Glass.vert
varying vec3 Normal;
varying vec3 EyeDir;
varying vec4 EyePos;
varying float LightIntensity;
uniform vec3 LightPos;
void main(void)
{
gl_Position = ftransform();
Normal = normalize(gl_NormalMatrix * gl_Normal);
vec4 pos = gl_ModelViewMatrix * gl_Vertex;
EyeDir = pos.xyz;
EyePos = gl_ModelViewProjectionMatrix * gl_Vertex;
LightIntensity = max(dot(normalize(LightPos - EyeDir), Normal), 0.0);
}
and the 2nd file: Glass.frag
const vec3 Xunitvec = vec3 (1.0, 0.0, 0.0);
const vec3 Yunitvec = vec3 (0.0, 1.0, 0.0);
uniform vec3 BaseColor;
uniform float Depth;
uniform float MixRatio;
// need to scale our framebuffer - it has a fixed width/height of 2048
uniform float FrameWidth;
uniform float FrameHeight;
uniform float textureWidth;
uniform float textureHeight;
uniform sampler2D EnvMap;
uniform sampler2D RefractionMap;
varying vec3 Normal;
varying vec3 EyeDir;
varying vec4 EyePos;
varying float LightIntensity;
void main (void)
{
// Compute reflection vector
vec3 reflectDir = reflect(EyeDir, Normal);
// Compute altitude and azimuth angles
vec2 index;
index.y = dot(normalize(reflectDir), Yunitvec);
reflectDir.y = 0.0;
index.x = dot(normalize(reflectDir), Xunitvec) * 0.5;
// Translate index values into proper range
if (reflectDir.z >= 0.0)
index = (index + 1.0) * 0.5;
else
{
index.t = (index.t + 1.0) * 0.5;
index.s = (-index.s) * 0.5 + 1.0;
}
// if reflectDir.z >= 0.0, s will go from 0.25 to 0.75
// if reflectDir.z < 0.0, s will go from 0.75 to 1.25, and
// that's OK, because we've set the texture to wrap.
// Do a lookup into the environment map.
vec3 envColor = vec3 (texture2D(EnvMap, index));
// calc fresnels term. This allows a view dependant blend of reflection/refraction
float fresnel = abs(dot(normalize(EyeDir), Normal));
fresnel *= MixRatio;
fresnel = clamp(fresnel, 0.1, 0.9);
// calc refraction
vec3 refractionDir = normalize(EyeDir) - normalize(Normal);
// Scale the refraction so the z element is equal to depth
float depthVal = Depth / -refractionDir.z;
// perform the div by w
float recipW = 1.0 / EyePos.w;
vec2 eye = EyePos.xy * vec2(recipW);
// calc the refraction lookup
index.s = (eye.x + refractionDir.x * depthVal);
index.t = (eye.y + refractionDir.y * depthVal);
// scale and shift so we're in the range 0-1
index.s = index.s / 2.0 + 0.5;
index.t = index.t / 2.0 + 0.5;
// as we're looking at the framebuffer, we want it clamping at the edge of the rendered scene, not the edge of the texture,
// so we clamp before scaling to fit
float recipTextureWidth = 1.0 / textureWidth;
float recipTextureHeight = 1.0 / textureHeight;
index.s = clamp(index.s, 0.0, 1.0 - recipTextureWidth);
index.t = clamp(index.t, 0.0, 1.0 - recipTextureHeight);
// scale the texture so we just see the rendered framebuffer
index.s = index.s * FrameWidth * recipTextureWidth;
index.t = index.t * FrameHeight * recipTextureHeight;
vec3 RefractionColor = vec3 (texture2D(RefractionMap, index));
// Add lighting to base color and mix
vec3 base = LightIntensity * BaseColor;
envColor = mix(envColor, RefractionColor, fresnel);
envColor = mix(envColor, base, 0.2);
gl_FragColor = vec4 (envColor, 1.0);
}
Edit:
I made it to the point to load those shaders in SceneKit:
NSURL *vertexShaderURL = [[NSBundle mainBundle] URLForResource:#"Glass" withExtension:#"vert"];
NSURL *fragmentShaderURL = [[NSBundle mainBundle] URLForResource:#"Glass" withExtension:#"frag"];
NSString *vertexShader = [[NSString alloc] initWithContentsOfURL:vertexShaderURL
encoding:NSUTF8StringEncoding
error:NULL];
NSString *fragmentShader = [[NSString alloc] initWithContentsOfURL:fragmentShaderURL
encoding:NSUTF8StringEncoding
error:NULL];
SCNProgram *program = [SCNProgram program];
program.delegate = self;
program.vertexShader = vertexShader;
program.fragmentShader = fragmentShader;
SCNMaterial *redMaterial = [SCNMaterial material];
redMaterial.diffuse.contents = [UIColor redColor];
redMaterial.locksAmbientWithDiffuse = YES;
redMaterial.program = program;
box.materials = #[redMaterial];
And Additionally, I've initialised these in shader files:
//frag file
BaseColor = vec3 (0.4, 0.4, 1.0)
Depth = 0.1;
MixRatio = 1;
EnvMap = 0;
RefractionMap = 1;
//vert file
LightPos = vec3 (0.0, 140.0, 0.0);
The Box now appears pink without glass effect. Removing program from redMaterial, the box appears red as expected without glass effect. so I'm still unable to achieve the desired effect. any help is much appreciated.
Edit 2:
xcode logs:
2016-11-21 08:08:26.758244 testGame[7837:3366037] [DYMTLInitPlatform] platform initialization successful
2016-11-21 08:08:27.196142 testGame[7837:3365880] Metal GPU Frame Capture Enabled
2016-11-21 08:08:27.196975 testGame[7837:3365880] Metal API Validation Enabled
You're seeing the fallback shader. Make sure you're creating your renderer with the specifier to prefer OpenGL, rather than Metal. For example, with SCNView:
_sceneView = [[SCNView alloc] initWithFrame:[UIScreen mainScreen].bounds
options:#{ SCNPreferredRenderingAPIKey: #(SCNRenderingAPIOpenGLES2) }];
Also, your shader might be raising an error that you're not seeing. Set the delegate property of the program to something that implements this:
- (void)program:(SCNProgram *)program handleError:(NSError *)error
{
NSLog(#"SCNProgram error %#", error);
}
and you'll get some debug info about why the shader isn't compiling.

Using NSImageView for video streaming

I am making an voip application which also has video support,
for video, i am getting data as a YUV format and to decode i am using libvpx,
and then i will RGB data,
Now to display i am using NSImageView where i will change the NSImage, please refer the code below
-(void)gotNewRGBBuffer:(void *)imageData Height:(int)height Width:(int)width Scan:(int)scan VideoId:(const char *)pId{
#autoreleasepool {
// display the image (on the UI thread)
NSBitmapImageRep *bitmap = [[NSBitmapImageRep alloc]
initWithBitmapDataPlanes:(unsigned char **)&imageData
pixelsWide:width pixelsHigh:height
bitsPerSample:8
samplesPerPixel:3 // or 4 with alpha
hasAlpha:NO
isPlanar:NO
colorSpaceName:NSDeviceRGBColorSpace
bitmapFormat:0
bytesPerRow:scan // 0 == determine automatically
bitsPerPixel:24]; // 0 == determine automatically
NSImage *image = [[NSImage alloc] initWithSize:NSMakeSize(width, height)];
[image addRepresentation:bitmap];
if ( ![NSThread isMainThread]){
[self performSelectorOnMainThread:#selector(updateImage:) withObject:image waitUntilDone:NO];
}else{
[self updateImage:image];
}
}
}
and function to update the image here it goes,
-(void)updateImage:(NSImage *)pNewLocalImage{
[self pImageView].image = pNewLocalImage;
[[self pImageView] setNeedsDisplay:YES];
NSLog(#" Updating the image ");
}
this is working, but taking some considerable amount of memory , so i my question, is there any other way to optimize it!
Thanks for looking at this,
I am now moved to OpenGL NSOpenGLView, which is rendering fast and consuming less cpu and memory,
Refer the code :
GLEssentials
In the given example, it will draw the PNG images , but i need to have YUV buffer, so you need to tweak the vertex and shaders like this,
- (BOOL)setupProgram {
NSAssert(!_program, #"program already set up");
GLuint vertexShader = CreateShader(GL_VERTEX_SHADER, kVertexShaderSource);
NSAssert(vertexShader, #"failed to create vertex shader");
GLuint fragmentShader =
CreateShader(GL_FRAGMENT_SHADER, kFragmentShaderSource);
NSAssert(fragmentShader, #"failed to create fragment shader");
_program = CreateProgram(vertexShader, fragmentShader);
// Shaders are created only to generate program.
if (vertexShader) {
glDeleteShader(vertexShader);
}
if (fragmentShader) {
glDeleteShader(fragmentShader);
}
if (!_program) {
return NO;
}
_position = glGetAttribLocation(_program, "position");
_texcoord = glGetAttribLocation(_program, "texcoord");
_ySampler = glGetUniformLocation(_program, "s_textureY");
_uSampler = glGetUniformLocation(_program, "s_textureU");
_vSampler = glGetUniformLocation(_program, "s_textureV");
if (_position < 0 || _texcoord < 0 || _ySampler < 0 || _uSampler < 0 ||
_vSampler < 0) {
return NO;
}
return YES;
}
Vertex and shader sources,
// Vertex shader doesn't do anything except pass coordinates through.
static const char kVertexShaderSource[] =
SHADER_VERSION
VERTEX_SHADER_IN " vec2 position;\n"
VERTEX_SHADER_IN " vec2 texcoord;\n"
VERTEX_SHADER_OUT " vec2 v_texcoord;\n"
"void main() {\n"
" gl_Position = vec4(position.x, position.y, 0.0, 1.0);\n"
" v_texcoord = texcoord;\n"
"}\n";
// Fragment shader converts YUV values from input textures into a final RGB
// pixel. The conversion formula is from http://www.fourcc.org/fccyvrgb.php.
static const char kFragmentShaderSource[] =
SHADER_VERSION
"precision highp float;"
FRAGMENT_SHADER_IN " vec2 v_texcoord;\n"
"uniform lowp sampler2D s_textureY;\n"
"uniform lowp sampler2D s_textureU;\n"
"uniform lowp sampler2D s_textureV;\n"
FRAGMENT_SHADER_OUT
"void main() {\n"
" float y, u, v, r, g, b;\n"
" y = " FRAGMENT_SHADER_TEXTURE "(s_textureY, v_texcoord).r;\n"
" u = " FRAGMENT_SHADER_TEXTURE "(s_textureU, v_texcoord).r;\n"
" v = " FRAGMENT_SHADER_TEXTURE "(s_textureV, v_texcoord).r;\n"
" u = u - 0.5;\n"
" v = v - 0.5;\n"
" r = y + 1.403 * v;\n"
" g = y - 0.344 * u - 0.714 * v;\n"
" b = y + 1.770 * u;\n"
" " FRAGMENT_SHADER_COLOR " = vec4(r, g, b, 1.0);\n"
" }\n";

Log brightness of pixels in NSImage

My goal is to find the brightness of every pixel in an image and then append it into an array. I have looked online (since this is way outside of my comfort zone, but I figured that's how I'll learn), and found several examples that get the pixel colors in Swift for IOS using UIImage. Unfortunately the same code can't be used for OS X since NSImage doesn't seem to convert to CGImage the same way a UIImage does. So that code became next to useless for me. Next I found this site, which offers a piece of example code that finds and logs the brightness of each pixel in an image. Bingo! The only problem is it is in Objective C, which I only slightly understand. After a bit more failed searches, I began attempting to translate the Objective C code. This is the original code:
- (void)setupWithImage:(UIImage*)image {
UIImage * fixedImage = [image imageWithFixedOrientation];
self.workingImage = fixedImage;
self.mainImageView.image = fixedImage;
// Commence with processing!
[self logPixelsOfImage:fixedImage];
}
- (void)logPixelsOfImage:(UIImage*)image {
// 1. Get pixels of image
CGImageRef inputCGImage = [image CGImage];
NSUInteger width = CGImageGetWidth(inputCGImage);
NSUInteger height = CGImageGetHeight(inputCGImage);
NSUInteger bytesPerPixel = 4;
NSUInteger bytesPerRow = bytesPerPixel * width;
NSUInteger bitsPerComponent = 8;
UInt32 * pixels;
pixels = (UInt32 *) calloc(height * width, sizeof(UInt32));
CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB();
CGContextRef context = CGBitmapContextCreate(pixels, width, height,
bitsPerComponent, bytesPerRow, colorSpace,
kCGImageAlphaPremultipliedLast|kCGBitmapByteOrder32Big);
CGContextDrawImage(context, CGRectMake(0, 0, width, height), inputCGImage);
CGColorSpaceRelease(colorSpace);
CGContextRelease(context);
#define Mask8(x) ( (x) & 0xFF )
#define R(x) ( Mask8(x) )
#define G(x) ( Mask8(x >> 8 ) )
#define B(x) ( Mask8(x >> 16) )
// 2. Iterate and log!
NSLog(#"Brightness of image:");
UInt32 * currentPixel = pixels;
for (NSUInteger j = 0; j < height; j++) {
for (NSUInteger i = 0; i < width; i++) {
UInt32 color = *currentPixel;
printf("%3.0f ", (R(color)+G(color)+B(color))/3.0);
currentPixel++;
}
printf("\n");
}
free(pixels);
#undef R
#undef G
#undef B
}
And this is my translated code:
func logPixelsOfImage(image: NSImage) {
var inputCGImage: CGImageRef = image
var width: UInt = CGImageGetWidth(inputCGImage)
var height: UInt = CGImageGetHeight(inputCGImage)
var bytesPerPixel: UInt = 4
var bytesPerRow: UInt = bytesPerPixel * width
var bitsPerComponent: UInt = 8
var pixels: UInt32!
//pixels = (UInt32 *) calloc(height * width, sizeof(UInt32));
var colorSpace: CGColorSpaceRef = CGColorSpaceCreateDeviceRGB()
var context: CGContextRef = CGBitmapContextCreate(pixels, width, height, bitsPerComponent, bytesPerRow, colorSpace, kCGImageAlphaPremultipliedLast | kCGBitmapByteOrder32Big) //Error: Use of unresolved identifier 'kCGImageAlphaPremultipliedLast' Second Error: Use of unresolved identifier 'kCGBitmapByteOrder32Big'
CGContextDrawImage(context, CGRectMake(0, 0, width, height), inputCGImage) // Error: 'Uint' is not convertible to 'CGFloat'
/*
#define Mask8(x) ( (x) & 0xFF )
#define R(x) ( Mask8(x) )
#define G(x) ( Mask8(x >> 8 ) )
#define B(x) ( Mask8(x >> 16) )
*/
println("Brightness of image:")
var currentPixel: UInt32 = pixels
for (var j : UInt = 0; j < height; j++) {
for ( var i : UInt = 0; i < width; i++) {
var color: UInt32 = currentPixel
//printf("%3.0f ", (R(color)+G(color)+B(color))/3.0);
currentPixel++
}
println("/n")
}
//free(pixels)
/*
#undef R
#undef G
#undeg B
*/
}
Comments contain (a) errors that the code provides or (b) code that was in the original that I don't know how to translate.
So what's my question?
Is there a better way of finding the brightness of a pixel than what is shown in the Objective-C code? If so, how would you do that? If not, how would I go about finishing up the translation of the original code?
Thanks -- A CodeIt that is extremely confused and every so slightly desperate.

OpenGL VBO Sphere Texture loading not working

I need to draw and texture a sphere. The project I am working on will involve a considerable amount of graphics which led me down the route of using VBO's.
I'm currently experiencing trouble trying to texture the sphere and all attempts have led me to a solid shaded sphere each time - with no visible texture. I am most likely doing something silly - but after many attempts, I am no further into understanding whether the problem is because of the texture loading, bad U/V Texture coordinates or using the wrong shaders..
Below is a copy of all of the source code.
//
//
// Copyright (c) 2013 Andy Ward. All rights reserved.
//
#import "SphereRenderer.h"
#import "shaderUtil.h"
#import "fileUtil.h"
#import "debug.h"
//#import <GLKit/GLKit.h>
// Shaders
enum {
PROGRAM_LIGHTING,
PROGRAM_PASSTHRU,
NUM_PROGRAMS
};
enum {
UNIFORM_MVP,
UNIFORM_MODELVIEW,
UNIFORM_MODELVIEWIT,
UNIFORM_LIGHTDIR,
UNIFORM_AMBIENT,
UNIFORM_DIFFUSE,
UNIFORM_SPECULAR,
UNIFORM_SHININESS,
UNIFORM_CONSTANT_COLOR,
NUM_UNIFORMS
};
enum {
ATTRIB_VERTEX,
ATTRIB_COLOR,
ATTRIB_NORMAL,
NUM_ATTRIBS
};
typedef struct {
char *vert, *frag;
GLint uniform[NUM_UNIFORMS];
GLuint id;
} programInfo_t;
programInfo_t program[NUM_PROGRAMS] = {
{ "lighting.vsh", "color.fsh" }, // PROGRAM_LIGHTING
{ "color.vsh", "color.fsh" }, // PROGRAM_PASSTHRU
};
typedef struct
{
float x;
float y;
float z;
float nx;
float ny;
float nz;
float u;
float v;
float r;
float g;
float b;
float a;
GLbyte padding[16];
} Vertex;
static float lightDir[3] = { 0.8, 4.0, 1.0 };
static float ambient[4] = { 0.35, 0.35, 0.35, 0.35 };
static float diffuse[4] = { 1.0-0.35, 1.0-0.35, 1.0-0.35, 1.0 };
static float specular[4] = { 0.8, 0.8, 0.8, 1.0 };
static float shininess = 8;
#implementation SphereRenderer
- (id)init
{
if (self = [super init])
{
angleDelta = -0.05f;
scaleFactor = 7; //max = 1025
r = 350; //scaleFactor * 48.0f;
//maxValue = 1025 * 48.0f;
xVelocity = 1.5f;
yVelocity = 0.0f;
xPos = r*2.0f;
yPos = r*3.0f;
// normalize light dir
lightDirNormalized = GLKVector3Normalize(GLKVector3MakeWithArray(lightDir));
projectionMatrix = GLKMatrix4Identity;
[self LoadTexture];
[self generateSphereData];
[self setupShaders];
}
return self;
}
- (void)makeOrthographicForWidth:(CGFloat)width height:(CGFloat)height
{
projectionMatrix = GLKMatrix4MakeOrtho(0, width, 0, height, -50000.0f, 2000.0f);
}
-(void)generateSphereData
{
#define PI 3.141592654
#define TWOPI 6.283185308
int x;
int index = 0;
float v1x, v1y, v1z;
float v2x, v2y, v2z;
float d;
int theta, phi;
float theta0, theta1;
float phi0, phi1;
Vertex quad[4];
Vertex *sphereData = malloc( 128 * 256* 6 * sizeof( Vertex ) );
float delta = M_PI / 128;
// 32 vertical segments
for(theta = 0; theta < 128; theta++)
{
theta0 = theta*delta;
theta1 = (theta+1)*delta;
// 64 horizontal segments
for(phi = 0; phi < 256; phi++)
{
phi0 = phi*delta;
phi1 = (phi+1)*delta;
// Generate 4 points per quad
quad[0].x = r * sin(theta0) * cos(phi0);
quad[0].y = r * cos(theta0);
quad[0].z = r * sin(theta0) * sin(phi0);
quad[0].u = (float)theta / (float)128;
quad[0].v = (float)phi / (float)256;
quad[1].x = r * sin(theta0) * cos(phi1);
quad[1].y = r * cos(theta0);
quad[1].z = r * sin(theta0) * sin(phi1);
quad[1].u = (float)theta / (float)128;
quad[1].v = (float)(phi + 1) / (float)256;
quad[2].x = r * sin(theta1) * cos(phi1);
quad[2].y = r * cos(theta1);
quad[2].z = r * sin(theta1) * sin(phi1);
quad[2].u = (float)(theta + 1)/ (float)128;
quad[2].v = (float)(phi + 1) / (float)256;
quad[3].x = r * sin(theta1) * cos(phi0);
quad[3].y = r * cos(theta1);
quad[3].z = r * sin(theta1) * sin(phi0);
quad[3].u = (float)(theta + 1) / (float)128;
quad[3].v = (float)phi / (float)256;
// Generate the normal
if(theta >= 4)
{
v1x = quad[1].x - quad[0].x;
v1y = quad[1].y - quad[0].y;
v1z = quad[1].z - quad[0].z;
v2x = quad[3].x - quad[0].x;
v2y = quad[3].y - quad[0].y;
v2z = quad[3].z - quad[0].z;
}
else
{
v1x = quad[0].x - quad[3].x;
v1y = quad[0].y - quad[3].y;
v1z = quad[0].z - quad[3].z;
v2x = quad[2].x - quad[3].x;
v2y = quad[2].y - quad[3].y;
v2z = quad[2].z - quad[3].z;
}
quad[0].nx = ( v1y * v2z ) - ( v2y * v1z );
quad[0].ny = ( v1z * v2x ) - ( v2z * v1x );
quad[0].nz = ( v1x * v2y ) - ( v2x * v1y );
d = 1.0f/sqrt(quad[0].nx*quad[0].nx +
quad[0].ny*quad[0].ny +
quad[0].nz*quad[0].nz);
quad[0].nx *= d;
quad[0].ny *= d;
quad[0].nz *= d;
// Generate the color - This was for testing until I have the textures loading...
if((theta ^ phi) & 1)
{
quad[0].r = 0.0f;
quad[0].g = 0.0f;
quad[0].b = 0.0f;
quad[0].a = 0.0f;
}
else
{
quad[0].r = 0.0f;
quad[0].g = 0.0f;
quad[0].b = 0.0f;
quad[0].a = 0.0f;
}
// Replicate vertex info.
for(x = 1; x < 4; x++)
{
quad[x].nx = quad[0].nx;
quad[x].ny = quad[0].ny;
quad[x].nz = quad[0].nz;
quad[x].r = quad[0].r;
quad[x].g = quad[0].g;
quad[x].b = quad[0].b;
quad[x].a = quad[0].a;
}
// Store the vertices in two triangles. We are drawing everything as triangles.
sphereData[index++] = quad[0];
sphereData[index++] = quad[1];
sphereData[index++] = quad[2];
sphereData[index++] = quad[0];
sphereData[index++] = quad[3];
sphereData[index++] = quad[2];
}
}
// Create the VAO
glGenVertexArrays(1, &vaoId);
glBindVertexArray(vaoId);
// Create a VBO buffer
glGenBuffers(1, &vboId);
glBindBuffer(GL_ARRAY_BUFFER, vboId);
glBufferData(GL_ARRAY_BUFFER, 128 * 256 * 6 * sizeof(Vertex), NULL, GL_STATIC_DRAW);
glBufferSubData(GL_ARRAY_BUFFER, 0, 128 * 256 * 6 * sizeof(Vertex), sphereData);
// set the colors - left as it's great for debugging
glEnableVertexAttribArray(ATTRIB_COLOR);
glVertexAttribPointer(ATTRIB_COLOR, 4, GL_FLOAT, GL_TRUE, sizeof(Vertex), (GLubyte *)(uintptr_t)offsetof(Vertex,r));
// set the normals
glEnableVertexAttribArray(ATTRIB_NORMAL);
glVertexAttribPointer(ATTRIB_NORMAL, 3, GL_FLOAT, GL_FALSE, sizeof(Vertex), (GLubyte *)(uintptr_t)offsetof(Vertex,nx));
// set the texture
glEnableVertexAttribArray(1);
glError();
glVertexAttribPointer(1, 2, GL_FLOAT, GL_FALSE, sizeof(Vertex), (GLubyte *)(uintptr_t)offsetof(Vertex,u));
glError();
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, texture);
// set the positions
glEnableVertexAttribArray(ATTRIB_VERTEX);
glVertexAttribPointer(ATTRIB_VERTEX, 3, GL_FLOAT, GL_FALSE, sizeof(Vertex), (GLubyte *)(uintptr_t)offsetof(Vertex,x));
//We need to free as we used malloc
free(sphereData);
}
-(void)LoadTexture
{
NSURL *url = nil;
CGImageSourceRef src;
CGImageRef image;
CGContextRef context = nil;
CGColorSpaceRef colorSpace;
GLubyte *data;
GLsizei width, height;
// NSImage* image = [NSImage imageNamed:#"World-satellite-map.png"];
NSBundle *bundle = [NSBundle bundleWithIdentifier: #"Award.WeatherEye3D"];
NSString *bundleRoot = [bundle pathForImageResource:#"World-satellite-map.png"];
url = [NSURL fileURLWithPath: bundleRoot];
src = CGImageSourceCreateWithURL((CFURLRef)url, NULL);
if (!src) {
NSLog(#"No image");
// free(data);
return;
}
image = CGImageSourceCreateImageAtIndex(src, 0, NULL);
CFRelease(src);
width = CGImageGetWidth(image);
height = CGImageGetHeight(image);
data = (GLubyte*) calloc(width * height * 4, sizeof(GLubyte));
colorSpace = CGColorSpaceCreateDeviceRGB();
context = CGBitmapContextCreate(data, width, height, 8, 4 * width, colorSpace, kCGImageAlphaPremultipliedFirst | kCGBitmapByteOrder32Host);
CGColorSpaceRelease(colorSpace);
// Core Graphics referential is upside-down compared to OpenGL referential
// Flip the Core Graphics context here
// An alternative is to use flipped OpenGL texture coordinates when drawing textures
CGContextTranslateCTM(context, 0.0, height);
CGContextScaleCTM(context, 1.0, -1.0);
// Set the blend mode to copy before drawing since the previous contents of memory aren't used. This avoids unnecessary blending.
CGContextSetBlendMode(context, kCGBlendModeCopy);
CGContextDrawImage(context, CGRectMake(0, 0, width, height), image);
CGContextRelease(context);
CGImageRelease(image);
glGenTextures(1, &texture);
glGenBuffers(1, &pboId);
// Bind the texture
glBindTexture(GL_TEXTURE_2D, texture);
// Bind the PBO
glBindBuffer(GL_PIXEL_UNPACK_BUFFER, pboId);
// Upload the texture data to the PBO
glBufferData(GL_PIXEL_UNPACK_BUFFER, width * height * 4 * sizeof(GLubyte), data, GL_STATIC_DRAW);
// Setup texture parameters
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glPixelStorei(GL_UNPACK_ROW_LENGTH, 0);
// OpenGL likes the GL_BGRA + GL_UNSIGNED_INT_8_8_8_8_REV combination
// Use offset instead of pointer to indictate that we want to use data copied from a PBO
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, width, height, 0,
GL_BGRA, GL_UNSIGNED_INT_8_8_8_8_REV, 0);
// We can delete the application copy of the texture data now
free(data);
glBindTexture(GL_TEXTURE_2D, 0);
glBindBuffer(GL_PIXEL_UNPACK_BUFFER, 0);
}
- (void)setupShaders
{
//This code has been lifted from an example.
for (int i = 0; i < NUM_PROGRAMS; i++)
{
char *vsrc = readFile(pathForResource(program[i].vert));
char *fsrc = readFile(pathForResource(program[i].frag));
GLsizei attribCt = 0;
GLchar *attribUsed[NUM_ATTRIBS];
GLint attrib[NUM_ATTRIBS];
GLchar *attribName[NUM_ATTRIBS] = {
"inVertex", "inColor", "inNormal",
};
const GLchar *uniformName[NUM_UNIFORMS] = {
"MVP", "ModelView", "ModelViewIT", "lightDir", "ambient", "diffuse", "specular", "shininess", "constantColor",
};
// auto-assign known attribs
for (int j = 0; j < NUM_ATTRIBS; j++)
{
if (strstr(vsrc, attribName[j]))
{
attrib[attribCt] = j;
attribUsed[attribCt++] = attribName[j];
}
}
glueCreateProgram(vsrc, fsrc,
attribCt, (const GLchar **)&attribUsed[0], attrib,
NUM_UNIFORMS, &uniformName[0], program[i].uniform,
&program[i].id);
free(vsrc);
free(fsrc);
// set constant uniforms
glUseProgram(program[i].id);
if (i == PROGRAM_LIGHTING)
{
// Set up lighting stuff used by the shaders
glUniform3fv(program[i].uniform[UNIFORM_LIGHTDIR], 1, lightDirNormalized.v);
glUniform4fv(program[i].uniform[UNIFORM_AMBIENT], 1, ambient);
glUniform4fv(program[i].uniform[UNIFORM_DIFFUSE], 1, diffuse);
glUniform4fv(program[i].uniform[UNIFORM_SPECULAR], 1, specular);
glUniform1f(program[i].uniform[UNIFORM_SHININESS], shininess);
}
else if (i == PROGRAM_PASSTHRU)
{
glUniform4f(program[i].uniform[UNIFORM_CONSTANT_COLOR], 0.0f,0.0f,0.0f,0.4f);
}
}
glError();
}
- (void)update
{
yPos = 400;
xPos = 375;
}
- (void)render
{
GLKMatrix4 modelViewMatrix, MVPMatrix, modelViewMatrixIT;
GLKMatrix3 normalMatrix;
glBindVertexArray(vaoId);
// glBindTexture(GL_TEXTURE, texture);
// Draw "shadow"
/* glUseProgram(program[PROGRAM_PASSTHRU].id);
glEnable(GL_CULL_FACE);
glDisable(GL_DEPTH_TEST);
glDepthMask(GL_FALSE);
glEnable(GL_BLEND);
glBlendFunc(GL_SRC_ALPHA_SATURATE, GL_ONE_MINUS_SRC_ALPHA);
*/
/*// Make the "shadow" move around a bit. This is not a real shadow projection.
GLKVector3 pos = GLKVector3Normalize(GLKVector3Make(xPos, yPos, -100.0f));
modelViewMatrix = GLKMatrix4MakeTranslation(xPos + (pos.v[0]-lightDirNormalized.v[0])*20.0,
yPos + (pos.v[1]-lightDirNormalized.v[1])*10.0,
-800.0f);
modelViewMatrix = GLKMatrix4Rotate(modelViewMatrix, -16.0f, 0.0f, 0.0f, 1.0f);
modelViewMatrix = GLKMatrix4Rotate(modelViewMatrix, angle, 0.0f, 1.0f, 0.0f);
modelViewMatrix = GLKMatrix4Scale(modelViewMatrix, 1.05f, 1.05f, 1.05f);
MVPMatrix = GLKMatrix4Multiply(projectionMatrix, modelViewMatrix);
glUniformMatrix4fv(program[PROGRAM_PASSTHRU].uniform[UNIFORM_MVP], 1, GL_FALSE, MVPMatrix.m);
//Draw the shadow arrays
glDrawArrays(GL_TRIANGLES, 0, 32*64*6);
*/
// Draw Sphere
glUseProgram(program[PROGRAM_LIGHTING].id);
glEnable(GL_DEPTH_TEST);
glDepthMask(GL_TRUE);
glDepthFunc(GL_LESS);
glDisable(GL_BLEND);
glCullFace(GL_BACK);
glFrontFace(GL_CCW);
glEnable(GL_CULL_FACE);
// ModelView
modelViewMatrix = GLKMatrix4MakeTranslation(xPos, yPos, -200.0f); // was -100
//modelViewMatrix = GLKMatrix4Rotate(modelViewMatrix, -0.01f, 0.0f, 0.0f, 0.01f);
// modelViewMatrix = GLKMatrix4Rotate(modelViewMatrix, angle, 0.0f, 1.0f, 0.0f);
glUniformMatrix4fv(program[PROGRAM_LIGHTING].uniform[UNIFORM_MODELVIEW], 1, GL_FALSE, modelViewMatrix.m);
// MVP
MVPMatrix = GLKMatrix4Multiply(projectionMatrix, modelViewMatrix);
glUniformMatrix4fv(program[PROGRAM_LIGHTING].uniform[UNIFORM_MVP], 1, GL_FALSE, MVPMatrix.m);
// ModelViewIT (normal matrix)
bool success;
modelViewMatrixIT = GLKMatrix4InvertAndTranspose(modelViewMatrix, &success);
if (success) {
normalMatrix = GLKMatrix4GetMatrix3(modelViewMatrixIT);
glUniformMatrix3fv(program[PROGRAM_LIGHTING].uniform[UNIFORM_MODELVIEWIT], 1, GL_FALSE, normalMatrix.m);
}
glDrawArrays(GL_TRIANGLE_STRIP, 0, 128*256*6 ); // Value needs changing for number of triangles...
glUseProgram(0);
glError();
}
- (void)dealloc
{
if (vboId) {
glDeleteBuffers(1, &vboId);
vboId = 0;
}
if (vaoId) {
glDeleteVertexArrays(1, &vaoId);
vaoId = 0;
}
if (vertexShader) {
glDeleteShader(vertexShader);
vertexShader = 0;
}
if (fragmentShader) {
glDeleteShader(fragmentShader);
fragmentShader = 0;
}
if (shaderProgram) {
glDeleteProgram(shaderProgram);
shaderProgram = 0;
}
[super dealloc];
}
#end
Lighting.vsh : -
#version 150
in vec4 inVertex, inColor;
in vec3 inNormal;
out vec4 color;
uniform mat4 MVP, ModelView;
uniform mat3 ModelViewIT;
uniform vec3 lightDir;
uniform vec4 ambient, diffuse, specular;
uniform float shininess;
void main()
{
// transform position to clip space
gl_Position = MVP * inVertex;
// transform position to eye space
vec3 eyePosition = vec3(ModelView * inVertex);
// transform normal to eye space (normalization skipped here: inNormal already normalized, matrix not scaled)
vec3 eyeNormal = ModelViewIT * inNormal;
// directional light ambient and diffuse contribution (lightDir alreay normalized)
float NdotL = max(dot(eyeNormal, lightDir), 0.0);
vec4 lightColor = ambient + diffuse * NdotL;
if (NdotL > 0.0)
{
// half angle
vec3 H = normalize(lightDir - normalize(eyePosition));
// specular contribution
float NdotH = max(dot(eyeNormal, H), 0.0);
lightColor += specular * pow(NdotH, shininess);
}
// apply directional light color and saturate result
// to match fixed function behavior
color = min(inColor * lightColor, 1.0);
}
color.vsh : -
#version 150
in vec4 inVertex;
out vec4 color;
uniform mat4 MVP;
uniform vec4 constantColor;
void main()
{
gl_Position = MVP * inVertex;
color = constantColor;
}
Color.fsh: -
#version 150
in vec4 color;
out vec4 fragColor;
void main()
{
fragColor = color;
}
For texture loading I always double check the modes I'm using.
For your shaders I would check the #version of vertex and fragment shaders and make sure it plays nicely with whatever version of OpenGL you have installed or whatever your video card supports. I used to do a lot with JOGL and whenever I used #version 330 instead of #version 400 it was because my video card wasn't one of the newest models at the time and didn't support any shader beyond 330. There is actually quite some difference between versions 150 and 400 so if you are doing anything more advanced in your GL code than what your shaders can support, your textures won't load. (i.e. there was a major change in OpenGL around there where it was no longer fixed function pipeline, it was then all programmable pipeline and therefore you had much more control at the cost of having to do more work....like write your own VBO, heh)
There are also certain functions in GLSL that are different from version to version and when you are going that far back to 150, many of the newer ones won't be recognized.
Here is a good reference for shader language and what versions of OpenGL they are compatible with http://en.wikipedia.org/wiki/OpenGL_Shading_Language. I know it's just wiki, but all the version mappings on there look correct.
Also, I always had to check the direction of my normals. If they are upside down or in the opposite direction they're supposed to be (like pointing inward instead of outward) then your lighting and textures also won't work.
Here's an example of a shader I wrote a while back before I started using the newer shader versions:
v.glsl
#version 130
in vec4 vPosition;
in vec4 vColor;
in vec3 vNormal;
in vec2 vTexCoord;
out vec4 color;
out vec3 E,L,N;
out vec2 texCoord;
uniform vec4 LightPosition;
uniform vec4 Projection, Model, View;
void main() {
vec3 pos = (Model * vPosition).xyz;
E = normalize((View * vec4(0,0,0,1)).xyz-pos);
//camera eye
L = normalize(LightPosition.xyz - pos);
N = normalize(Model * vec4(vNormal, 0.0)).xyz; //set normal vector
gl_Position = Projection * View * Model * vPosition; //view mode: Projection
texCoord = vTextCoord; //output vector of texture coordinates
color = vColor; //output vector that tells you the color of each vertex
}
f.glsl
#version 130
in vec4 color;
in vec2 texCoord;
in vec3 N,L,E;
out vec4 fColor;
uniform sampler2D texture;
uniform vec4 GlobalAmbient, AmbientProduct, DiffuseProduct, SpecularProduct;
uniform vec3 LightDirection;
uniform float Shininess, CutoffAngle, LightIntensity;
void main() {
vec3 D, H;
//process the spotlight
D = normalize(LightDirection);
H = normalize(L+E); //normalize the sum of the Light and Camera (Eye) vectors
vec4 ambient = vec4(0,0,0,0);
vec4 diffuse = vec4(0,0,0,1);
vec4 specular = vec4(0,0,0,1);
vec4 color = vec4(0,0,0,0);
//spot coefficient
float Kc = LightIntensity * max(dot(D,-L)-CutoffAngle,0.0);
//ambient coefficient
ambient = (Kc*AmbientProduct) + ambient + GlobalAmbient;
//diffuse coefficient
float Kd = max(dot(L,N), 0.0);
//diffuse component
diffuse = Kc * Kd * DiffuseProduct + diffuse;
//specular coefficient
float Ks = pow(max(dot(E,H), 0.0), Shininess);
//specular component
if(dot(L,N) >= 0.0) {
specular = Kc * Ks * SpecularProduct + specular;
}
fColor = (color + ambient + diffuse + specular) * texture2D(texture, texCoord);
fColor.a = 1.0; //fully opaque
}
I'll take a look at this some more when I get home because I love graphics. Now again, this shader code talks to Java code (using JOGL libs) so it will be done differently in Objective C, but the ideas are all the same.
Also check the order of your gl function calls - that can make a difference in many cases.
In ObjC I'd imagine you'd hand over your pixel data like this:
- (GLuint)setupTexture:(NSString *)fileName {
CGImageRef spriteImage = [UIImage imageNamed:fileName].CGImage;
if (!spriteImage) {
NSLog(#"Failed to load image %#", fileName);
exit(1);
}
size_t width = CGImageGetWidth(spriteImage);
size_t height = CGImageGetHeight(spriteImage);
GLubyte * spriteData = (GLubyte *) calloc(width*height*4, sizeof(GLubyte));
CGContextRef spriteContext = CGBitmapContextCreate(spriteData, width, height, 8, width*4,
CGImageGetColorSpace(spriteImage), kCGImageAlphaPremultipliedLast);
CGContextDrawImage(spriteContext, CGRectMake(0, 0, width, height), spriteImage);
CGContextRelease(spriteContext);
GLuint texName;
glGenTextures(1, &texName);
glBindTexture(GL_TEXTURE_2D, texName);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, width, height, 0, GL_RGBA, GL_UNSIGNED_BYTE, spriteData);
free(spriteData);
return texName;
}