I'm starting to work with OpenGL in iOS. I have always learned to draw stuff in OpenGL using glBegin() and glEnd() so this is kind of new to me.
I am trying to draw a simple triangle. I can draw a white triangle nicely, I even can draw an entire colored triangle using glColor. But whenever I try to assign a color to each vertex using this code below, I get a EXC_BAD_ACCESS when drawing the array. I am using iOS 4.3 simulator for this. What am I doing wrong?
- (void) render:(CADisplayLink*)displayLink {
glClearColor(0, 104.0/255.0, 55.0/255.0, 1.0);
glClear(GL_COLOR_BUFFER_BIT);
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
GLfloat vertices [] = {0,0,0, 0,100,0, 100,0,0};
GLfloat colours [] = {1.0,1.0,1.0, 1.0,1.0,1.0, 1.0,1.0,1.0};
glEnableClientState(GL_VERTEX_ARRAY);
glEnableClientState(GL_COLOR_ARRAY);
glVertexPointer(3, GL_FLOAT, 0, vertices);
glColorPointer(3, GL_FLOAT, 0, colours);
glDrawArrays(GL_TRIANGLES, 0, 3); <-- CRASHES HERE
glDisableClientState(GL_VERTEX_ARRAY);
glDisableClientState(GL_COLOR_ARRAY);
[self.context presentRenderbuffer:GL_RENDERBUFFER];
}
The line glColorPointer(3, GL_FLOAT, 0, colours) generates the GL_INVALID_VALUE error (you can see that doing po glGetError right after executing that line, it will print 1281).
The reason is that OpenGL ES doesn't support 3 color components, the documentation states:
GL_INVALID_VALUE is generated if size is not 4.
You code will be ok if you change the number of color components to 4 by adding alpha.
Your OpenGL code looks correct so far.
Does the call to glDrawArrays cause bad access, or happens bad access within it?
I just can imagine, that the glDrawArrays func pointer is not initialized,
vertex arrays should be available though.
You may call this function after glEnableClientState(GL_COLOR_ARRAY);
as a test to reset any other stale array pointers, which could cause bad access:
///#brief Emulation of call glClientAttribDefaultEXT(GL_CLIENT_VERTEX_ARRAY_BIT) according to GL_EXT_direct_state_access.
static void ClientAttribDefaultVertexArray(void) {
int i;
GLint max;
glBindBufferARB(GL_ARRAY_BUFFER, 0);
glBindBufferARB(GL_ELEMENT_ARRAY_BUFFER, 0);
glDisableClientState(GL_EDGE_FLAG_ARRAY);
glEdgeFlagPointer(0, 0);
glDisableClientState(GL_INDEX_ARRAY);
glIndexPointer(GL_FLOAT, 0, 0);
glDisableClientState(GL_SECONDARY_COLOR_ARRAY);
glSecondaryColorPointer(4, GL_FLOAT, 0, 0);
glDisableClientState(GL_FOG_COORD_ARRAY);
glFogCoordPointer(GL_FLOAT, 0, 0);
glGetIntegerv(GL_MAX_TEXTURE_COORDS, &max);
for (i = 0; i < max; ++i) {
glClientActiveTextureARB(GL_TEXTURE0 + i);
glDisableClientState(GL_TEXTURE_COORD_ARRAY);
glTexCoordPointer(4, GL_FLOAT, 0, 0);
}
glDisableClientState(GL_COLOR_ARRAY);
glColorPointer(4, GL_FLOAT, 0, 0);
glDisableClientState(GL_NORMAL_ARRAY);
glNormalPointer(GL_FLOAT, 0, 0);
glDisableClientState(GL_VERTEX_ARRAY);
glVertexPointer(4, GL_FLOAT, 0, 0);
glDisableClientState(GL_WEIGHT_ARRAY_ARB);
glWeightPointerARB(0, GL_FLOAT, 0, 0);
glGetIntegerv(GL_MAX_VERTEX_ATTRIBS, &max);
for (i = 0; i < max; ++i) {
glDisableVertexAttribArrayARB(i);
glVertexAttribPointerARB(i, 4, GL_FLOAT, GL_FALSE, 0, 0);
}
glClientActiveTextureARB(GL_TEXTURE0);
}
Additionally, you can push and pop the vertex array state on the client attrib stack:
glPushClientAttrib(GL_CLIENT_VERTEX_ARRAY_BIT);
///vertex array init and usage
glPopClientAttrib();
Related
So I've been moving from a legacy profile to Core Profile for the last couple of days. I'd already moved much of my functionality to use VBOs and shaders, so I thought it wouldn't take that much work.
However, I can't get my new core profile contexts to draw anything at all using glDrawElements. My application manipulates textures in an app wide background openglContext, the GUI shows various stages of that using OpenGL views that share contexts with the background context.
Each texture object builds it's own VBOs for texture coords and colours as required, leaving me only to provide new vertex VBOs for displaying in views. The number of vertices and their drawing is standardised so I can share Index buffers.
This is my Pixel format shared between all contexts:
+ (NSOpenGLPixelFormat *) defaultPixelFormat
{
NSOpenGLPixelFormatAttribute attrs[] =
{
kCGLPFAOpenGLProfile, kCGLOGLPVersion_3_2_Core,
NSOpenGLPFADoubleBuffer,
NSOpenGLPFABackingStore,
NSOpenGLPFAAllowOfflineRenderers,
NSOpenGLPFAStencilSize, 8,
NSOpenGLPFAColorSize, 32,
NSOpenGLPFADepthSize, 24,
0
};
NSOpenGLPixelFormat* pixFmt = [[NSOpenGLPixelFormat alloc] initWithAttributes:attrs];
return pixFmt;
}
This is a short example of how I setup my VAOs without using a texture, I just want to draw something!
- (void) genTestVao
{
// Generate buffers first to simulate app environment
GLfloat verts[] = {
0.0, 0.0, 0.0, 0.0,
100.0, 0.0, 0.0, 0.0,
0.0, 100.0, 0.0, 0.0,
100.0, 100.0, 0.0, 0.0
};
GLfloat colors[] = {
1.0, 1.0, 1.0, 0.0,
1.0, 1.0, 1.0, 0.0,
1.0, 1.0, 1.0, 0.0,
1.0, 1.0, 1.0, 0.0
};
GLushort indices[] = {0, 1, 2, 3};
if (_testVBuffer) {
glDeleteBuffers(1, &_testVBuffer);
}
if (_testCBuffer) {
glDeleteBuffers(1, &_testCBuffer);
}
if (_testIBuffer) {
glDeleteBuffers(1, &_testIBuffer);
}
glGenBuffers(1, &_testVBuffer);
glBindBuffer(GL_ARRAY_BUFFER, _testVBuffer);
glBufferData(GL_ARRAY_BUFFER, sizeof(verts), verts, GL_DYNAMIC_DRAW);
glGenBuffers(1, &_testCBuffer);
glBindBuffer(GL_ARRAY_BUFFER, _testCBuffer);
glBufferData(GL_ARRAY_BUFFER, sizeof(colors), colors, GL_DYNAMIC_DRAW);
glBindBuffer(GL_ARRAY_BUFFER, 0);
// vert and colors buffers done
glGenBuffers(1, &_testIBuffer);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, _testIBuffer);
glBufferData(GL_ELEMENT_ARRAY_BUFFER, sizeof(indices), indices, GL_DYNAMIC_DRAW);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0);
// Index buffer done
// Generate VAO with pre stored buffers
if (_testVAO) {
glDeleteVertexArrays(1, &_testVAO);
}
glGenVertexArrays(1, &_testVAO);
glBindVertexArray(_testVAO);
// Vertex
glBindBuffer(GL_ARRAY_BUFFER, _testVBuffer);
glEnableVertexAttribArray(kSCGLVertexAttribPosition);
glVertexAttribPointer(kSCGLVertexAttribPosition, 4, GL_FLOAT, GL_FALSE, sizeof(GL_FLOAT) * 4, 0);
// Colors
glBindBuffer(GL_ARRAY_BUFFER, _testCBuffer);
glEnableVertexAttribArray(kSCGLColorAttribPosition);
glVertexAttribPointer(kSCGLColorAttribPosition, 4, GL_FLOAT, GL_FALSE, sizeof(GL_FLOAT) * 4, 0);
glBindVertexArray(0);
glBindBuffer(GL_ARRAY_BUFFER, 0);
}
Setting up a view matrix:
_mvpMatrix = GLKMatrix4Multiply(
GLKMatrix4MakeOrtho(0.0, self.bounds.size.width, 0.0, self.bounds.size.height, -1.0, 1.0),
GLKMatrix4Identity);
The drawing code:
glUseProgram(self.testShader.shaderProgram);
glUniformMatrix4fv(self.testShader.mvpMatrixLocation, 1, GL_FALSE, self.mvpMatrix.m);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, _testIBuffer);
glDrawElements(GL_TRIANGLE_STRIP, 4, GL_UNSIGNED_SHORT, 0);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0);
glBindVertexArray(0);
glBindTexture(GL_TEXTURE_RECTANGLE, 0);
glUseProgram(0);
The vertex Shader:
#version 150
in vec4 position;
in vec4 color;
uniform mat4 mvpMatrix;
out vec4 vertex_color;
void main()
{
// perform standard transform on vertex
gl_Position = position * mvpMatrix;
vertex_color = color;
}
The Fragment Shader:
#version 150
in vec4 vertex_color;
out vec4 colourout;
void main()
{
colourout = vertex_color;
}
And finaly the code that links shader and vertices and binds the attribute locations:
- (BOOL) createProgramObjectWithVertexShader:(GLuint) vertShader withFragShader:(GLuint) fragShader
{
_shaderProgram = glCreateProgram();
glAttachShader(_shaderProgram, vertShader);
glBindAttribLocation(_shaderProgram, kSCGLVertexAttribPosition, "position");
GLenum error = glGetError();
if (error != GL_NO_ERROR) {
NSLog(#"Error generated getting position!");
if (error == GL_INVALID_VALUE) {
NSLog(#"Invalid value");
} else if (error == GL_INVALID_OPERATION) {
NSLog(#"Invalid operation");
} else {
NSLog(#"unexpected error");
}
}
glBindAttribLocation(_shaderProgram, kSCGLColorAttribPosition, "color");
error = glGetError();
if (error != GL_NO_ERROR) {
NSLog(#"Error generated getting color!");
if (error == GL_INVALID_VALUE) {
NSLog(#"Invalid value");
} else if (error == GL_INVALID_OPERATION) {
NSLog(#"Invalid operation");
} else {
NSLog(#"unexpected error");
}
}
//glBindAttribLocation(_shaderProgram, kSCGLNormalAttribPosition, "normal");
//glBindAttribLocation(_shaderProgram, kSCGLTexCoordPosition, "texcoord");
error = glGetError();
if (error != GL_NO_ERROR) {
NSLog(#"Error generated getting texcoord!");
if (error == GL_INVALID_VALUE) {
NSLog(#"Invalid value");
} else if (error == GL_INVALID_OPERATION) {
NSLog(#"Invalid operation");
} else {
NSLog(#"unexpected error");
}
}
glAttachShader(_shaderProgram, fragShader);
glLinkProgram(_shaderProgram);
glDeleteShader(vertShader);
glDeleteShader(fragShader);
GLint result = GL_FALSE;
GLint infoLogLength = 0;
glGetProgramiv(_shaderProgram, GL_INFO_LOG_LENGTH, &infoLogLength);
if (infoLogLength > 0) {
char errMsg[infoLogLength];
glGetProgramInfoLog(_shaderProgram, infoLogLength, &infoLogLength, errMsg);
NSString *msg = [NSString stringWithUTF8String:errMsg];
NSLog(#"Self = %#", self);
NSLog(#"Validate program failed with %#", msg);
if (![msg hasPrefix:#"WARNING:"]) {
NSLog(#"Fatal");
glDeleteProgram(_shaderProgram);
return NO;
}
}
if (![self getUniformLocations]) {
NSLog(#"Failed getting uniform variables for %#", self.shaderName);
glDeleteProgram(_shaderProgram);
return NO;
}
return YES;
}
I'm sure it's something simple, but I just can't see what it is and it's driving me crazy. The opengl view is setup correctly, if I clear it with colours, it shows correctly, it just won't draw my elements....
Part of the reason I'm moving to core profile is to share code with an iOS app, except for some simple changes, most of my opengl code is es compatible.
EDIT 1:
I created a rough and ready XCode project that shows the basics on GitHub. The app delegate holds a base shared openglContext and loads the test shader. The openGLView is based on a shared context from the App delegate:
EDIT 2:
I updated the project with a couple of corrections, now something draws, but it's not what I expect. It's a single colour where I've used multiple, and it's in the top right when I expect it to be in the bottom left.
An apple guy posted the answer to my problems on the apple devforums.
The reason I couldn't get anything to draw was the order of the matrix and position multiplication matters in the vertex shader. So the line:
gl_Position = position * mvpMatrix;
Should be:
gl_Position = mvpMatrix * position;
EDIT: Removed second part of answer as per Reto's comment.
I'm desperatly trying to draw a filled square with Cocos2D and I can't manage to find an example on how to do it :
Here is my draw method. I succeeded in drawing a square but I can't manage to fill it !
I've read that I need to use a OpenGL method called glDrawArrays with a parameter GL_TRIANGLE_FAN in order to draw a filled square and that's what I tried.
-(void) draw
{
// Disable textures - we want to draw with plaine colors
ccGLEnableVertexAttribs( kCCVertexAttribFlag_Position | kCCVertexAttribFlag_Color );
float l_fRedComponent = 0;
float l_fGreenComponent = 0;
float l_fBlueComponent = 0;
float l_fAlphaComponent = 0;
[mpColor getRed:&l_fRedComponent green:&l_fGreenComponent blue:&l_fBlueComponent alpha:&l_fAlphaComponent];
ccDrawColor4F(l_fRedComponent, l_fGreenComponent, l_fBlueComponent, l_fAlphaComponent);
glLineWidth(10);
CGPoint l_bottomLeft, l_bottomRight, l_topLeft, l_topRight;
l_bottomLeft.x = miPosX - miWidth / 2.0f;
l_bottomLeft.y = miPosY - miHeight / 2.0f;
l_bottomRight.x = miPosX + miWidth / 2.0f;
l_bottomRight.y = miPosY - miHeight / 2.0f;
l_topRight.x = miPosX + miWidth / 2.0f;
l_topRight.y = miPosY + miHeight / 2.0f;
l_topLeft.x = miPosX - miWidth / 2.0f;
l_topLeft.y = miPosY + miHeight / 2.0f;
CGPoint vertices[] = { l_bottomLeft, l_bottomRight, l_topRight, l_topLeft, l_bottomLeft };
int l_arraySize = sizeof(vertices) / sizeof(CGPoint) ;
// My old way of doing this, it draws a square, but not filled.
//ccDrawPoly( vertices, l_arraySize, NO);
// Deprecated method :(
//glVertexPointer(2, GL_FLOAT, 0, vertices);
// I've found something related to this method to replace the deprecated one, but can't understand this method !
glVertexAttribPointer(kCCVertexAttrib_Position, 3, GL_FLOAT, GL_FALSE, 0, vertices);
glDrawArrays(GL_TRIANGLE_FAN, 0, l_arraySize);
}
I've found some examples with the old version of Cocos2D (1.0) but since it's been upgraded to version 2.0 "lately" all the examples I find give me compilation errors !
Could anyone enlight my path here please ?
I didn't know today is "Reinvent the Wheel" day. :)
ccDrawSolidRect(CGPoint origin, CGPoint destination, ccColor4F color);
If you were to go all crazy and wanted to draw filled polygons, there's also:
ccDrawSolidPoly(const CGPoint *poli, NSUInteger numberOfPoints, ccColor4F color);
The "solid" methods are new in Cocos2D 2.x.
You can simply create CCLayerColor instance with needed content size and use it as filled square. In other case you have to triangulate your polygon (it will have two triangles in case of square) and draw it using OpenGL.
---EDIT
Didn't test this code, find it with google, but it seems to work fine.
http://www.deluge.co/?q=cocos-2d-custom-filled-polygon
Trying to modify this OpenGL and GLKit tutorial at raywenderlich.com, I am trying to render a cube from a wavefront obj file, without the per vertex color information and with surface normals. But the thing that is rendered looks nothing like a cube.
For parsing an obj file I have a method (createDrawable) that goes through the obj and saves the info into a struct (Drawable) that contains four things: vertex buffer, index buffer, number of faces in the object and the transform matrix of the object.
(Here are the header, .m file and the .obj file.)
- (Drawable)createDrawable: (NSString *)objFileName {
......
......
// Parsed obj and put info in vertexData and indices arrays.
Drawable _drawable;
_drawable.numFaces = numFaces;
glGenBuffers(1, &_drawable.vertexBuffer);
glBindBuffer(GL_ARRAY_BUFFER, _drawable.vertexBuffer);
glBufferData(GL_ARRAY_BUFFER, sizeof(vertexData) * numFaces * 8, vertexData, GL_STATIC_DRAW);
glGenBuffers(1, &_drawable.indexBuffer);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, _drawable.indexBuffer);
glBufferData(GL_ELEMENT_ARRAY_BUFFER, sizeof(indices) * numFaces * 3, indices, GL_STATIC_DRAW);
_drawable.matrix = GLKMatrix4Identity;
_drawable.matrix = GLKMatrix4Translate(_drawable.matrix, 0.0f, 0.0f, 10.0f);
return _drawable;
}
For rendering I am using another method (renderDrawable) that binds an object's buffers, sets pointers to them and then renders using glDrawElements(..).
- (void) renderDrawable: (Drawable)object {
glBindBuffer(GL_ARRAY_BUFFER, object.vertexBuffer);
glEnableVertexAttribArray(GLKVertexAttribPosition);
glVertexAttribPointer(GLKVertexAttribPosition, 3, GL_FLOAT, GL_FALSE, 0, (const GLvoid *) 0);
glEnableVertexAttribArray(GLKVertexAttribTexCoord1);
glVertexAttribPointer(GLKVertexAttribTexCoord1, 2, GL_FLOAT, GL_FALSE, 0, (const GLvoid *) 3);
glEnableVertexAttribArray(GLKVertexAttribNormal);
glVertexAttribPointer(GLKVertexAttribNormal, 3, GL_FLOAT, GL_FALSE, 0, (const GLvoid *) 5);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, object.indexBuffer);
glDrawElements(GL_LINES, sizeof(object.indexBuffer) * 3 * object.numFaces, GL_UNSIGNED_BYTE, (const GLvoid *) object.indexBuffer);
}
I think I am doing something wrong with the buffers (part of createDrawable shown here and renderDrawable, in the .m file), but I just can't figure out what it is.
I think that the "stride" param in glVertexArrayPointer should be set to 8 * sizeof(float) for instance (size of a single vertex)
glVertexAttribPointer(GLKVertexAttribNormal, 3, GL_FLOAT, GL_FALSE, 8 * sizeof(float), (const GLvoid *) 5);
when stride is 0 - that means attribs are stored in a continous way... but you have interleaved attribs so you must provide some more info to OpenGL.
Another issue: what is the sizeof(vertexData) * numFaces * 8... I think is should be: sizeof(vertexData) * numFaces * 3
And for index buffer: sizeof(indices) * numFaces * 3 - I think it should be sizeof(int) * numFaces * 3
int - type for indices, but you have GL_BYTE (that means that you can have only 256 different indices!)
when rendering:
glDrawArrays - you have index buffer bound, so set the last param to NULL
I think you problem is that you pass GL_LINES to glDrawElements() instead of GL_TRIANGLES.
What I need to do is draw a vertex array that has more than 256 elements. When I have less than that many, and I use GL_UNSIGNED_BYTE in my call to glDrawElements, everything works fine. When I have more than 256 elements, it starts drawing back at the first vertex again (i.e., last element [256 - 255, whatever] connects with first [1, or 0], and further elements don't get drawn). If I use GL_UNSIGNED_SHORT instead, I get EXC_BAD_ACCESS. What gives?
int indexLim = self.animIndex;
GLushort glIndLim = (GLushort)indexLim;
Vertex localVertices[glIndLim];
GLubyte localIndices[glIndLim];
for(GLushort i=0; i < glIndLim; i++)
{
x = (float)i;
y = [[data objectAtIndex:i ] floatValue];
x = x*xScale + xOffset;
y = y*yScale + yOffset;
localVertices[i].Position[0] = x;
localVertices[i].Position[1] = y;
localVertices[i].Position[2] = z;
localVertices[i].Color[0] = r;
localVertices[i].Color[1] = g;
localVertices[i].Color[2] = b;
localVertices[i].Color[3] = a;
localIndices[i] = i;
}
// setupVBOs
GLuint vertexBuffer;
glGenBuffers(1, &vertexBuffer);
glBindBuffer(GL_ARRAY_BUFFER, vertexBuffer);
glBufferData(GL_ARRAY_BUFFER, sizeof(localVertices), localVertices, GL_STATIC_DRAW);
GLuint indexBuffer;
glGenBuffers(1, &indexBuffer);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, indexBuffer);
glBufferData(GL_ELEMENT_ARRAY_BUFFER, sizeof(localIndices), localIndices, GL_STATIC_DRAW);
glVertexAttribPointer(_positionSlot, 3, GL_FLOAT, GL_FALSE, sizeof(Vertex), 0);
glVertexAttribPointer(_colorSlot, 4, GL_FLOAT, GL_FALSE, sizeof(Vertex), (GLvoid*) (sizeof(float) * 3));
//glDrawElements(GL_LINE_STRIP, glIndLim, GL_UNSIGNED_BYTE, 0); // Works, but only draws 256 elements
glDrawElements(GL_LINE_STRIP, glIndLim, GL_UNSIGNED_SHORT, 0); // EXC_BAD_ACCESS!!!!
Have you tried defining:
GLubyte localIndices[glIndLim];
as
GLushort localIndices[glIndLim];
?
The reasoning is that if that should represent the index for your vertex, it should admit all possible values for GLushort, otherwise the local index will always be a GLubyte.
Log your index value each time through the loop. It sounds like you're incrementing a variable beyond its maximum value and it is becoming a negative number.
I read EXT_discard_framebuffer which causes the contents of the named framebuffer attachable images to become undefined. And after discard framebuffer, the pixel value from glReadPixels are same as before discard framebuffer. why? And, with this extension, an OpenGL ES implementation how to optimize away the storing back of framebuffer contents after rendering the frame?
//
// create a texture object
//
GLuint tex;
glGenTextures(1, &tex);
glBindTexture(GL_TEXTURE_2D, tex);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR_MIPMAP_LINEAR);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, texWidth, texHeight, 0, GL_RGBA, GL_UNSIGNED_BYTE, 0);
glBindTexture(GL_TEXTURE_2D, 0);
//
// create a framebuffer object
//
GLuint fbo;
GLboolean check;
glGenFramebuffers(1, &fbo);
glBindFramebuffer(GL_FRAMEBUFFER, fbo);
check = glIsFramebuffer(fbo1);
if (!check) {
__android_log_print(ANDROID_LOG_ERROR, "debug",
"------ check Framebuffer object failed ------\n");
return EGL_FALSE;
}
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, tex, 0);
if (glCheckFramebufferStatus(GL_FRAMEBUFFER) != GL_FRAMEBUFFER_COMPLETE) {
__android_log_print(ANDROID_LOG_ERROR, "debug",
"------ fbo not set completely!------\n");
return EGL_FALSE;
}
draw_texture(fbo);
GLubyte sampledColor[4] = {0, 0, 0, 0};
int randX = 128, randY = 128;
GLenum attachments[] = { GL_COLOR_ATTACHMENT0, GL_DEPTH_ATTACHMENT };
glBindFramebuffer(GL_FRAMEBUFFER, fbo);
glReadPixels(randX, randY, 1, 1, GL_RGBA, GL_UNSIGNED_BYTE, sampledColor);
__android_log_print(ANDROID_LOG_INFO, "debug",
"[LINE: %d] coordinate(%d, %d) color is (%d, %d, %d, %d)\n",
__LINE__, randX, randY, sampledColor[0], sampledColor[1],
sampledColor[2], sampledColor[3]);
glDiscardFramebufferEXT(GL_FRAMEBUFFER, 2, attachments);
glReadPixels(randX, randY, 1, 1, GL_RGBA, GL_UNSIGNED_BYTE, sampledColor);
__android_log_print(ANDROID_LOG_INFO, "debug",
"[LINE: %d] coordinate(%d, %d) color is (%d, %d, %d, %d)\n",
__LINE__, randX, randY, sampledColor[0], sampledColor[1],
sampledColor[2], sampledColor[3]);
Job of glDiscardFramebufferEXT is to inform driver that you don't care about contents of framebuffer. What driver (or GPU) decides to do with it - it's not up to you. Driver can reset all contents to 0, or it can leave as it is, or maybe it will use this information when you will next call glClear and will perform it more efficiently (for example by allocating new memory for contents, instead of performing memset with 0 value). Don't worry about that what it will do exactly.