Ping-pong rendering between two FBOs fails after first frame. - objective-c

I am trying to create two FBOs and implement a ping-pong render. But, I only get the first frame to work properly. I am trying to simulate a game-of-life and, after the first frame, I only get a black screen. Could you help me check it? I have spent hours on this issue.
Edit
Maybe I didn't describe clearly. Actually, I want to use the textureB as the texture and render it to textureA, then use the textureA to render to screen, then vice versa.
Edit
I can see the first frame, which is the textureB. After it go through the fragment shader, it become black. At first, I suspect the fragment shader, I change it to only revert the black to white and white to black. It still becomes all black.
Set up the fbo and texture
glEnable(GL_TEXTURE_2D);
glGenTextures(1, &textureA);
glBindTexture(GL_TEXTURE_2D, textureA);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, 256, 256, 0, GL_RGBA,
GL_UNSIGNED_BYTE, NULL);
glGenTextures(1, &textureB);
glBindTexture(GL_TEXTURE_2D, textureB);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
data=(GLubyte*)malloc(256*256*4*sizeof(GLubyte));
GLubyte val;
for (int i = 0; i < 256 * 256 * 4; i+=4) {
if (rand()%10 ==1)
{ val = 0; }
else
{ val = 255; }
data[i] = data[i+1] = data[i+2] = val;
data[i+3] = 255;
}
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, 256, 256, 0, GL_RGBA, GL_UNSIGNED_BYTE, data);
glGenFramebuffers(1, &fboA);
glBindFramebuffer(GL_FRAMEBUFFER, fboA);
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, textureA, 0);
glGenFramebuffers(1, &fboB);
glBindFramebuffer(GL_FRAMEBUFFER, fboB);
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, textureB, 0);
Render Loop
if ([context API] == kEAGLRenderingAPIOpenGLES2) {
if(counter%2==0)
{
glUseProgram(automateProg);
glBindFramebuffer(GL_FRAMEBUFFER, fboA);
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, textureB);
glUniform1i(AUTOMATE_TEXT, 0);
glUniform1f(DU, 1.0/256);
glUniform1f(DV, 1.0/256);
// Update attribute values.
glVertexAttribPointer(ATTRIB_VERTEX_2, 2, GL_FLOAT, 0, 0, squareVertices);
glEnableVertexAttribArray(ATTRIB_VERTEX_2);
glVertexAttribPointer(ATTRIB_TEXCOORD_2, 2, GL_FLOAT, GL_FALSE, 0, texCoord);
//glEnableVertexAttribArray(ATTRIB_TEXCOORD_2);
glDrawArrays(GL_TRIANGLE_STRIP, 0, 4);
if (![self validateProgram:automateProg]) {
NSLog(#"Failed to validate program: %d", automateProg);
return;
}
glBindFramebuffer(GL_FRAMEBUFFER, 0);
glUseProgram(0);
}
else
{
glUseProgram(automateProg);
glBindFramebuffer(GL_FRAMEBUFFER, fboB);
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, textureA);
glUniform1i(AUTOMATE_TEXT, 0);
glUniform1f(DU, 1.0/256);
glUniform1f(DV, 1.0/256);
// Update attribute values.
glVertexAttribPointer(ATTRIB_VERTEX_2, 2, GL_FLOAT, 0, 0, squareVertices);
glEnableVertexAttribArray(ATTRIB_VERTEX_2);
glVertexAttribPointer(ATTRIB_TEXCOORD_2, 2, GL_FLOAT, GL_FALSE, 0, texCoord);
//glEnableVertexAttribArray(ATTRIB_TEXCOORD_2);
glDrawArrays(GL_TRIANGLE_STRIP, 0, 4);
if (![self validateProgram:automateProg]) {
NSLog(#"Failed to validate program: %d", automateProg);
return;
}
glBindFramebuffer(GL_FRAMEBUFFER, 0);
glUseProgram(0);
}
[(EAGLView *)self.view setFramebuffer];
glClearColor(0.5f, 0.5f, 0.5f, 1.0f);
glClear(GL_COLOR_BUFFER_BIT);
if (counter % 2 == 0) {
glUseProgram(normalProg);
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, textureB);
glUniform1i(NORMAL_TEXT, 0);
glVertexAttribPointer(ATTRIB_VERTEX, 2, GL_FLOAT, 0, 0, squareVertices);
glEnableVertexAttribArray(ATTRIB_VERTEX);
glVertexAttribPointer(ATTRIB_TEXCOORD, 2, GL_FLOAT, GL_FALSE, 0, texCoord);
glEnableVertexAttribArray(ATTRIB_TEXCOORD);
glDrawArrays(GL_TRIANGLE_STRIP, 0, 4);
if (![self validateProgram:normalProg]) {
NSLog(#"Failed to validate program: %d", normalProg);
return;
}
glUseProgram(0);
} else {
glUseProgram(normalProg);
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, textureA);
glUniform1i(NORMAL_TEXT, 0);
glVertexAttribPointer(ATTRIB_VERTEX, 2, GL_FLOAT, 0, 0, squareVertices);
glEnableVertexAttribArray(ATTRIB_VERTEX);
glVertexAttribPointer(ATTRIB_TEXCOORD, 2, GL_FLOAT, GL_FALSE, 0, texCoord);
glEnableVertexAttribArray(ATTRIB_TEXCOORD);
glDrawArrays(GL_TRIANGLE_STRIP, 0, 4);
if (![self validateProgram:normalProg]) {
NSLog(#"Failed to validate program: %d", normalProg);
return;
}
glUseProgram(0);
}
counter++;
[(EAGLView *)self.view presentFramebuffer];
Fragment Shader
precision mediump float;
varying vec2 v_texCoord;
uniform sampler2D tex; //the input texture
uniform float du; //the width of the cells
uniform float dv; //the height of the cells
void main() {
int count = 0;
vec4 C = texture2D( tex, v_texCoord );
vec4 E = texture2D( tex, vec2(v_texCoord.x + du, v_texCoord.y) );
vec4 N = texture2D( tex, vec2(v_texCoord.x, v_texCoord.y + dv) );
vec4 W = texture2D( tex, vec2(v_texCoord.x - du, v_texCoord.y) );
vec4 S = texture2D( tex, vec2(v_texCoord.x, v_texCoord.y - dv) );
vec4 NE = texture2D( tex, vec2(v_texCoord.x + du, v_texCoord.y + dv) );
vec4 NW = texture2D( tex, vec2(v_texCoord.x - du, v_texCoord.y + dv) );
vec4 SE = texture2D( tex, vec2(v_texCoord.x + du, v_texCoord.y - dv) );
vec4 SW = texture2D( tex, vec2(v_texCoord.x - du, v_texCoord.y - dv) );
if (E.r == 1.0) { count++; }
if (N.r == 1.0) { count++; }
if (W.r == 1.0) { count++; }
if (S.r == 1.0) { count++; }
if (NE.r == 1.0) { count++; }
if (NW.r == 1.0) { count++; }
if (SE.r == 1.0) { count++; }
if (SW.r == 1.0) { count++; }
if ( (count == 2 || count == 3)) {
gl_FragColor = vec4(1.0, 1.0, 1.0, 1.0); //cell lives...
} else {
gl_FragColor = vec4(0.0,0.0,0.0, 1.0); //cell dies...
}
}

Do I understand your code right, that you want to render a result to a texture in the first if-else-block and render that result to screen in the second if-else-block?
If so, then it looks like you have a mistake in how you organize your input and output to begin with.
This is what happens in your first pass (I reduced your code):
if(counter%2==0)
{
glBindFramebuffer(GL_FRAMEBUFFER, fboA); // will render to textureA
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, textureB); // textureB is our input
} else {
...
}
if (counter % 2 == 0) {
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, textureB); // textureB still as input? not textureA?
} else {
...
}
...and this is what happens in the second pass:
if(counter%2==0)
{
...
} else {
glBindFramebuffer(GL_FRAMEBUFFER, fboB); // will render to textureB
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, textureA); // textureA as input
}
if (counter % 2 == 0) {
...
} else {
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, textureA); // textureA as input again?
}
The reason why you see something in the first frame is, because you actually render your input data, but not the result of your first pass. And the reason why you have black screen in your second pass may be that your fragment shader does not work correctly. Judging from your shader code, a mistake in accessing the neighbor texels seems to be the most plausible cause for that. Can you provide the values of duand dv?
Also I don't think that using only one texture unit should make any trouble, as Brad pointed out earlier. I'm not sure about that though.
On a side note: for ping-ponging you should consider creating your FBOs as an array to make your code a lot more readable.
EDIT:
I you have problems setting your uniforms du and dv with glUniform1f(), try glUniform1i() (you need to cast with float() in your shader then) or glUniform1fv() instead. I once had the same problem with the PowerVR GLES2 drivers, where this function didn't do anything and caused the uniform to be 0.0.

You have two textures that you'd like to deal with, yet I see only one texture unit being used here. Perhaps if you bound your FBO texture to texture unit one using code like the following:
glBindFramebuffer(GL_FRAMEBUFFER, fboA);
glActiveTexture(GL_TEXTURE1);
glBindTexture(GL_TEXTURE_2D, textureA);
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, textureB);
or
glBindFramebuffer(GL_FRAMEBUFFER, fboB);
glActiveTexture(GL_TEXTURE1);
glBindTexture(GL_TEXTURE_2D, textureB);
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, textureA);
before you render each frame, it would properly read from the one texture bound to unit 0 and output via the FBO to the other texture on unit 1.
As an alternative, you could permanently bind one texture to one unit and the other to the other unit, and alternate values for your AUTOMATE_TEXT uniform to indicate which unit to pull from. This would be a little more efficient, because it would avoid the overhead of binding the textures on every render.

If a color plane of a frame has to be accessed, the a texture has to be attached to the frame buffer, which is written to. If a texture, which is attached to a frame buffer, has to be read in a shader, then the textue has to be bound to a textur unit and the index of the textue unit has to be set toa texture sampler uniform of the shader.
Since you can't read from a frame buffer and write to the same frame buffer at once (this would cause undefined behavior) you have to draw to read from one frame buffer and to write to a second frame buffer.
After a each frame the frame buffers have to change their place. The buffer which was read from will become the buffer which will be written to and the buffer which was written to will become the buffer which will be read from.
Create the textures for the frame buffer attachments:
GLuint colorTexture[2];
glGenTextures( 2, &colorTexture[0] );
glBindTexture( GL_TEXTURE_2D, colorTexture[0] );
glTexImage2D( GL_TEXTURE_2D, 0, GL_RGBA8, width, height, 0, GL_RGBA, GL_UNSIGNED_BYTE, NULL );
glBindTexture( GL_TEXTURE_2D, colorTexture[1] );
glTexImage2D( GL_TEXTURE_2D, 0, GL_RGBA8, width, height, 0, GL_RGBA, GL_UNSIGNED_BYTE, NULL );
Create the frame buffers:
GLuint frameBuffer[2];
glGenFramebuffers( 2, &frameBuffer[0] );
glBindFramebuffer( GL_FRAMEBUFFER, frameBuffer[0] );
glFramebufferTexture2D( GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, colorTexture[0], 0 );
glBindFramebuffer( GL_FRAMEBUFFER, frameBuffer[1] );
glFramebufferTexture2D( GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, colorTexture[1], 0 );
Note, if a depth buffer or even a stencil buffer is required, then a GL_DEPTH_ATTACHMENT, GL_STENCIL_ATTACHMENT or GL_DEPTH_STENCIL_ATTACHMENT has to be attached to the frame buffer.
Since every frame is drawn to a frame buffer you have to implement a post process, which brings the color plane from the frame buffer to the drawing buffer. This can be done by glBlitFramebuffer, which transfer a rectangle of pixel values from one region of a read framebuffer to another region of a draw framebuffer.
glBindFramebuffer( GL_READ_FRAMEBUFFER, frameBuffer[ ... ] );
glBindFramebuffer( GL_DRAW_FRAMEBUFFER, 0 );
glBlitFramebuffer( 0, 0, width, height, 0, 0, width, height, GL_COLOR_BUFFER_BIT, GL_NEAREST );
Your render main loop should look somehow like this:
int drawFB = 0;
while ( /* ... */ )
{
int readFB = 1 - drawFB;
glBindFramebuffer(GL_FRAMEBUFFER, frameBuffer[drawFB]);
glActiveTexture(GL_TEXTURE0 + 1);
glBindTexture(GL_TEXTURE_2D, colorTexture[readFB]);
glProgramUse( /* shader program object */ );
glUniform1i( /* texture sampler 2D location */, 1 );
// do the drawing
// ...
// post processing
glBindFramebuffer( GL_READ_FRAMEBUFFER, frameBuffer[drawFB] );
glBindFramebuffer( GL_DRAW_FRAMEBUFFER, 0 );
glBlitFramebuffer( 0, 0, width, height, 0, 0, width, height, GL_COLOR_BUFFER_BIT, GL_NEAREST );
drawFB = 1 - drawFB;
}
As an alternative, you can also use 1 frame buffer with 2 color planes and 2 textures attached. Activate alternately the first or the second color plane:
GLuint frameBuffer;
glGenFramebuffers( 1, &frameBuffer );
glBindFramebuffer( GL_FRAMEBUFFER, frameBuffer );
glFramebufferTexture2D( GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, colorTexture[0], 0 );
glFramebufferTexture2D( GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT1, GL_TEXTURE_2D, colorTexture[1], 0 );
int drawFB = 0;
while ( /* ... */ )
{
int readFB = 1 - drawFB;
glBindFramebuffer(GL_FRAMEBUFFER, frameBuffer);
glDrawBuffer( drawFB == 0 ? GL_COLOR_ATTACHMENT0 : GL_COLOR_ATTACHMENT1 );
glActiveTexture(GL_TEXTURE0 + 1);
glBindTexture(GL_TEXTURE_2D, colorTexture[readFB]);
glProgramUse( /* shader program object */ );
glUniform1i( /* texture sampler 2D location */, 1 );
// do the drawing
// ...
// post processing
glBindFramebuffer( GL_READ_FRAMEBUFFER, frameBuffer[drawFB] );
glBindFramebuffer( GL_DRAW_FRAMEBUFFER, 0 );
glBlitFramebuffer( 0, 0, width, height, 0, 0, width, height, GL_COLOR_BUFFER_BIT, GL_NEAREST );
drawFB = 1 - drawFB;
}
See following simple WebGL example for demonstration of the process:
var ShaderProgram = {};
ShaderProgram.Create = function( shaderList, uniformNames ) {
var shaderObjs = [];
for ( var i_sh = 0; i_sh < shaderList.length; ++ i_sh ) {
var shderObj = this.CompileShader( shaderList[i_sh].source, shaderList[i_sh].stage );
if ( shderObj == 0 )
return 0;
shaderObjs.push( shderObj );
}
var progObj = this.LinkProgram( shaderObjs )
if ( progObj != 0 ) {
progObj.unifomLocation = {};
for ( var i_n = 0; i_n < uniformNames.length; ++ i_n ) {
var name = uniformNames[i_n];
progObj.unifomLocation[name] = gl.getUniformLocation( progObj, name );
}
}
return progObj;
}
ShaderProgram.Use = function( progObj ) { gl.useProgram( progObj ); }
ShaderProgram.SetUniformInt = function( progObj, name, val ) { gl.uniform1i( progObj.unifomLocation[name], val ); }
ShaderProgram.SetUniform2i = function( progObj, name, arr ) { gl.uniform2iv( progObj.unifomLocation[name], arr ); }
ShaderProgram.SetUniformFloat = function( progObj, name, val ) { gl.uniform1f( progObj.unifomLocation[name], val ); }
ShaderProgram.SetUniform2f = function( progObj, name, arr ) { gl.uniform2fv( progObj.unifomLocation[name], arr ); }
ShaderProgram.SetUniform3f = function( progObj, name, arr ) { gl.uniform3fv( progObj.unifomLocation[name], arr ); }
ShaderProgram.SetUniformMat44 = function( progObj, name, mat ) { gl.uniformMatrix4fv( progObj.unifomLocation[name], false, mat ); }
ShaderProgram.CompileShader = function( source, shaderStage ) {
var shaderScript = document.getElementById(source);
if (shaderScript) {
source = "";
var node = shaderScript.firstChild;
while (node) {
if (node.nodeType == 3) source += node.textContent;
node = node.nextSibling;
}
}
var shaderObj = gl.createShader( shaderStage );
gl.shaderSource( shaderObj, source );
gl.compileShader( shaderObj );
var status = gl.getShaderParameter( shaderObj, gl.COMPILE_STATUS );
if ( !status ) alert(gl.getShaderInfoLog(shaderObj));
return status ? shaderObj : 0;
}
ShaderProgram.LinkProgram = function( shaderObjs ) {
var prog = gl.createProgram();
for ( var i_sh = 0; i_sh < shaderObjs.length; ++ i_sh )
gl.attachShader( prog, shaderObjs[i_sh] );
gl.linkProgram( prog );
status = gl.getProgramParameter( prog, gl.LINK_STATUS );
if ( !status ) alert("Could not initialise shaders");
gl.useProgram( null );
return status ? prog : 0;
}
var FrameBuffer = {};
FrameBuffer.Create = function( vp, texturePlan ) {
var texPlan = texturePlan ? new Uint8Array( texturePlan ) : null;
var fb = gl.createFramebuffer();
fb.width = vp[0];
fb.height = vp[1];
gl.bindFramebuffer( gl.FRAMEBUFFER, fb );
fb.color0_texture = gl.createTexture();
gl.bindTexture( gl.TEXTURE_2D, fb.color0_texture );
gl.texParameteri( gl.TEXTURE_2D, gl.TEXTURE_MAG_FILTER, gl.NEAREST );
gl.texParameteri( gl.TEXTURE_2D, gl.TEXTURE_MIN_FILTER, gl.NEAREST );
gl.texImage2D( gl.TEXTURE_2D, 0, gl.RGBA, fb.width, fb.height, 0, gl.RGBA, gl.UNSIGNED_BYTE, texPlan );
fb.renderbuffer = gl.createRenderbuffer();
gl.bindRenderbuffer( gl.RENDERBUFFER, fb.renderbuffer );
gl.renderbufferStorage( gl.RENDERBUFFER, gl.DEPTH_COMPONENT16, fb.width, fb.height );
gl.framebufferTexture2D( gl.FRAMEBUFFER, gl.COLOR_ATTACHMENT0, gl.TEXTURE_2D, fb.color0_texture, 0 );
gl.framebufferRenderbuffer( gl.FRAMEBUFFER, gl.DEPTH_ATTACHMENT, gl.RENDERBUFFER, fb.renderbuffer );
gl.bindTexture( gl.TEXTURE_2D, null );
gl.bindRenderbuffer( gl.RENDERBUFFER, null );
gl.bindFramebuffer( gl.FRAMEBUFFER, null );
fb.Bind = function( clear ) {
gl.bindFramebuffer( gl.FRAMEBUFFER, this );
if ( clear ) {
gl.clearColor( 0.0, 0.0, 0.0, 1.0 );
//gl.clear( gl.COLOR_BUFFER_BIT | gl.DEPTH_BUFFER_BIT );
gl.clear( gl.DEPTH_BUFFER_BIT );
}
};
fb.Release = function( clear ) {
gl.bindFramebuffer( gl.FRAMEBUFFER, null );
if ( clear ) {
gl.clearColor( 0.0, 0.0, 0.0, 1.0 );
//gl.clear( gl.COLOR_BUFFER_BIT | gl.DEPTH_BUFFER_BIT );
gl.clear( gl.DEPTH_BUFFER_BIT );
}
};
fb.BindTexture = function( textureUnit ) {
gl.activeTexture( gl.TEXTURE0 + textureUnit );
gl.bindTexture( gl.TEXTURE_2D, this.color0_texture );
};
return fb;
}
var curBufInx = 0;
var tick = 0;
var signal = 0;
function drawScene(){
var canvas = document.getElementById( "glow-canvas" );
var vp = [canvas.width, canvas.height];
var currentTime = Date.now();
var deltaMS = currentTime - startTime
testTick = Tick( currentTime, 0.05 )
signal = testTick > tick ? 1 : 0;
tick = testTick
var srcBufInx = curBufInx == 0 ? 1 : 0;
gl.viewport( 0, 0, drawFB[curBufInx].width, drawFB[curBufInx].height );
gl.enable( gl.DEPTH_TEST );
drawFB[curBufInx].Bind( true );
// set up draw shader
ShaderProgram.Use( progDraw );
var texUnitSource = 2;
drawFB[srcBufInx].BindTexture( texUnitSource );
ShaderProgram.SetUniformInt( progDraw, "u_colorAttachment0", texUnitSource );
ShaderProgram.SetUniform2i( progDraw, "u_textureSize", [drawFB[curBufInx].width, drawFB[curBufInx].height] );
ShaderProgram.SetUniformInt( progDraw, "u_signal", signal );
gl.enableVertexAttribArray( progDraw.inPos );
gl.bindBuffer( gl.ARRAY_BUFFER, bufQuad.pos );
gl.vertexAttribPointer( progDraw.inPos, 2, gl.FLOAT, false, 0, 0 );
gl.bindBuffer( gl.ELEMENT_ARRAY_BUFFER, bufQuad.inx );
gl.drawElements( gl.TRIANGLES, bufQuad.inxLen, gl.UNSIGNED_SHORT, 0 );
gl.disableVertexAttribArray( progDraw.inPos );
drawFB[curBufInx].Release( true );
gl.viewport( 0, 0, canvas.width, canvas.height );
var texUnitDraw = 2;
drawFB[curBufInx].BindTexture( texUnitDraw );
ShaderProgram.Use( progScreenSpace );
ShaderProgram.SetUniformInt( progScreenSpace, "u_colorAttachment0", texUnitDraw );
gl.enableVertexAttribArray( progScreenSpace.inPos );
gl.bindBuffer( gl.ARRAY_BUFFER, bufQuad.pos );
gl.vertexAttribPointer( progScreenSpace.inPos, 2, gl.FLOAT, false, 0, 0 );
gl.bindBuffer( gl.ELEMENT_ARRAY_BUFFER, bufQuad.inx );
gl.drawElements( gl.TRIANGLES, bufQuad.inxLen, gl.UNSIGNED_SHORT, 0 );
gl.disableVertexAttribArray( progScreenSpace.inPos );
curBufInx = curBufInx == 0 ? 1 : 0;
}
function Tick( currentTime, intervall ) {
return Math.trunc( (currentTime - startTime) / intervall );
}
var plot_download_request = false;
var drawFB;
var sliderScale = 100.0
var gl;
var progDraw;
var progScreenSpace;
var bufCube = {};
var bufQuad = {};
function sceneStart() {
var canvas = document.getElementById( "glow-canvas");
var vp = [canvas.width, canvas.height];
gl = canvas.getContext( "experimental-webgl" );
if ( !gl )
return;
progDraw = ShaderProgram.Create(
[ { source : "draw-shader-vs", stage : gl.VERTEX_SHADER },
{ source : "draw-shader-fs", stage : gl.FRAGMENT_SHADER }
],
[ "u_colorAttachment0", "u_textureSize", "u_signal" ] );
progDraw.inPos = gl.getAttribLocation( progDraw, "inPos" );
if ( progDraw == 0 )
return;
progScreenSpace = ShaderProgram.Create(
[ { source : "screen-shader-vs", stage : gl.VERTEX_SHADER },
{ source : "screen-shader-fs", stage : gl.FRAGMENT_SHADER }
],
[ "u_colorAttachment0" ] );
progScreenSpace.inPos = gl.getAttribLocation( progDraw, "inPos" );
if ( progDraw == 0 )
return;
// create frame buffers
var texCX = Math.floor(vp[0] / 4);
var texCY = Math.floor(vp[1] / 4);
var texPlan = [];
for (ix = 0; ix < texCX; ++ix) {
for (iy = 0; iy < texCY; ++iy) {
texPlan.push( 0, 0, 0, 0 );
}
}
for (ip = 0; ip < texCX * texCY / 20; ++ip) {
var inx_tex = Math.floor( Math.random() * texCY ) * texCX + Math.floor( Math.random() * texCX );
texPlan[inx_tex * 4 + 0] = 255 * Math.random();
texPlan[inx_tex * 4 + 1] = 255 * Math.random();
texPlan[inx_tex * 4 + 2] = 127;
texPlan[inx_tex * 4 + 3] = 255;
}
drawFB = [ FrameBuffer.Create( [texCX, texCY], texPlan ), FrameBuffer.Create( [texCX, texCY], texPlan ) ];
bufQuad.pos = gl.createBuffer();
gl.bindBuffer( gl.ARRAY_BUFFER, bufQuad.pos );
gl.bufferData( gl.ARRAY_BUFFER, new Float32Array( [ -1.0, -1.0, 1.0, -1.0, 1.0, 1.0, -1.0, 1.0 ] ), gl.STATIC_DRAW );
bufQuad.inx = gl.createBuffer();
gl.bindBuffer( gl.ELEMENT_ARRAY_BUFFER, bufQuad.inx );
gl.bufferData( gl.ELEMENT_ARRAY_BUFFER, new Uint16Array( [ 0, 1, 2, 0, 2, 3 ] ), gl.STATIC_DRAW );
bufQuad.inxLen = 6;
startTime = Date.now();
setInterval(drawScene, 50);
}
<script id="draw-shader-vs" type="x-shader/x-vertex">
precision mediump float;
attribute vec2 inPos;
varying vec2 vertPos;
void main()
{
vertPos.xy = inPos.xy;
gl_Position = vec4( inPos, 0.0, 1.0 );
}
</script>
<script id="draw-shader-fs" type="x-shader/x-fragment">
precision mediump float;
varying vec2 vertPos;
uniform sampler2D u_colorAttachment0;
uniform ivec2 u_textureSize;
uniform int u_signal;
vec3 Merge( in vec2 texC, in vec2 dir )
{
vec2 testC = texC + dir;
vec2 rangeTest = step( vec2(0.0), testC ) * step( testC, vec2(1.0) );
vec3 texCol = texture2D( u_colorAttachment0, testC ).rgb;
vec2 tempDir = texCol.xy * 2.0 - 1.0;
vec2 pDir = tempDir;
pDir.x *= step( abs(tempDir.y * 0.7), abs( tempDir.x ) );
pDir.y *= step( abs(tempDir.x * 0.7), abs( tempDir.y ) );
pDir = sign( pDir );
vec2 tDir = sign( dir );
//vec2 dirTestTemp = step( vec2(0.5), -tDir * pDir );
//float dirTest = dirTestTemp.x * dirTestTemp.y;
vec2 dirTestTemp = tDir + pDir;
float dirTest = 1.0 - step( 0.5, abs( dirTestTemp.x ) + abs( dirTestTemp.y ) );
return rangeTest.x * rangeTest.y * dirTest * texCol;
}
void main()
{
ivec2 texSize = u_textureSize;
vec2 texStep = vec2( 1.0 / float( texSize.x ), 1.0 / float( texSize.y ) );
vec2 texC = vertPos.st * 0.5 + 0.5;
vec3 texCol = vec3(0.0);
if ( u_signal == 0 )
{
texCol = texture2D( u_colorAttachment0, texC ).rgb;
}
else
{
texCol += Merge( texC, -texStep );
texCol += Merge( texC, vec2( -texStep.x, 0.0 ) );
texCol += Merge( texC, vec2( -texStep.x, texStep.y ) );
texCol += Merge( texC, vec2( 0.0, -texStep.y ) );
texCol += Merge( texC, vec2( 0.0, texStep.y ) );
texCol += Merge( texC, vec2( texStep.x, -texStep.y ) );
texCol += Merge( texC, vec2( texStep.x, 0.0 ) );
texCol += Merge( texC, texStep );
}
if ( texCol.b > 0.0 )
{
vec2 colDir = texCol.rg * 2.0 - 1.0;
vec2 pDir = sign( colDir );
vec2 nextTexC = texC + pDir * texStep;
if ( nextTexC.x <= texStep.x/2.0 || nextTexC.x >= 1.0-texStep.x/2.0 )
colDir.x = -colDir.x;
if ( nextTexC.y <= texStep.y/2.0 || nextTexC.y >= 1.0-texStep.y/2.0 )
colDir.y *= -1.0;
texCol.rg = colDir * 0.5 + 0.5;
}
vec3 col = texCol.rgb;
gl_FragColor = vec4( col, 1.0 );
}
</script>
<script id="screen-shader-vs" type="x-shader/x-vertex">
precision mediump float;
attribute vec2 inPos;
varying vec2 vertPos;
void main()
{
vertPos.xy = inPos.xy;
gl_Position = vec4( inPos, 0.0, 1.0 );
}
</script>
<script id="screen-shader-fs" type="x-shader/x-fragment">
precision mediump float;
varying vec2 vertPos;
uniform sampler2D u_colorAttachment0;
void main()
{
vec4 texCol = texture2D( u_colorAttachment0, vertPos.st * 0.5 + 0.5 );
gl_FragColor = vec4( texCol.rgb, 1.0 );
}
</script>
<body onload="sceneStart();">
<canvas id="glow-canvas" style="border: none;" width="256" height="256"></canvas>
</body>

Related

Cannot bring particle system in 3D scene

I have been following this lesson for implementing a particle system.
Trying to bring the particle system in 3D scene .
My entry point and initialization looks like :
bool initOpenGL()
{
// Intialize GLFW
// GLFW is configured. Must be called before calling any GLFW functions
if (!glfwInit())
{
// An error occured
std::cerr << "GLFW initialization failed" << std::endl;
return false;
}
glfwWindowHint(GLFW_SAMPLES, 4);
glfwWindowHint(GLFW_RESIZABLE, GL_FALSE);
glfwWindowHint(GLFW_CONTEXT_VERSION_MAJOR, 3);
glfwWindowHint(GLFW_CONTEXT_VERSION_MINOR, 3);
glfwWindowHint(GLFW_OPENGL_PROFILE, GLFW_OPENGL_CORE_PROFILE);
glfwWindowHint(GLFW_OPENGL_FORWARD_COMPAT, GL_TRUE); // forward compatible with newer versions of OpenGL as they become available but not backward compatible (it will not run on devices that do not support OpenGL 3.3
// Create an OpenGL 3.3 core, forward compatible context window
gWindow = glfwCreateWindow(gWindowWidth, gWindowHeight, APP_TITLE, NULL, NULL);
if (gWindow == NULL)
{
std::cerr << "Failed to create GLFW window" << std::endl;
glfwTerminate();
return false;
}
// Make the window's context the current one
glfwMakeContextCurrent(gWindow);
// Initialize GLEW
glewExperimental = GL_TRUE;
if (glewInit() != GLEW_OK)
{
std::cerr << "Failed to initialize GLEW" << std::endl;
return false;
}
// Set the required callback functions
glfwSetKeyCallback(gWindow, glfw_onKey);
glfwSetFramebufferSizeCallback(gWindow, glfw_onFramebufferSize);
glfwSetScrollCallback(gWindow, glfw_onMouseScroll);
glClearColor(gClearColor.r, gClearColor.g, gClearColor.b, gClearColor.a);
// Define the viewport dimensions
glViewport(0, 0, gWindowWidth, gWindowHeight);
glEnable(GL_DEPTH_TEST);
glDepthFunc(GL_LESS);
return true;
}
Trying to render both the particle system and 3D scene composed of meshes in the same scene like:
glGenVertexArrays(1, &VertexArrayID);
glBindVertexArray(VertexArrayID);
programID = LoadShaders("shaders/Particle.vertexshader", "shaders/Particle.fragmentshader");
CameraRight_worldspace_ID = glGetUniformLocation(programID, "CameraRight_worldspace");
CameraUp_worldspace_ID = glGetUniformLocation(programID, "CameraUp_worldspace");
ViewProjMatrixID = glGetUniformLocation(programID, "VP");
TextureID = glGetUniformLocation(programID, "myTextureSampler");
for (int i = 0; i < MaxParticles; i++)
{
ParticlesContainer[i].life = -1.0f;
ParticlesContainer[i].cameradistance = -1.0f;
}
Texture = loadDDS("textures/particle.DDS");
glGenBuffers(1, &billboard_vertex_buffer);
glBindBuffer(GL_ARRAY_BUFFER, billboard_vertex_buffer);
glBufferData(GL_ARRAY_BUFFER, sizeof(g_vertex_buffer_data), g_vertex_buffer_data, GL_STATIC_DRAW);
glGenBuffers(1, &particles_position_buffer);
glBindBuffer(GL_ARRAY_BUFFER, particles_position_buffer);
// Initialize with empty (NULL) buffer : it will be updated later, each frame.
glBufferData(GL_ARRAY_BUFFER, MaxParticles * 4 * sizeof(GLfloat), NULL, GL_STREAM_DRAW);
// The VBO containing the colors of the particles
glGenBuffers(1, &particles_color_buffer);
glBindBuffer(GL_ARRAY_BUFFER, particles_color_buffer);
// Initialize with empty (NULL) buffer : it will be updated later, each frame.
glBufferData(GL_ARRAY_BUFFER, MaxParticles * 4 * sizeof(GLubyte), NULL, GL_STREAM_DRAW);
while (!glfwWindowShouldClose(gWindow))
{
showFPS(gWindow);
double currentTime = glfwGetTime();
double deltaTime = currentTime - lastTime;
// Poll for and process events
glfwPollEvents();
update(deltaTime);
// Clear the screen
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glm::mat4 model(1.0), view(1.0), projection(1.0);
// Create the View matrix
view = fpsCamera.getViewMatrix();
glm::mat4 ViewMatrix = view;
// Create the projection matrix
projection = glm::perspective(glm::radians(fpsCamera.getFOV()), (float)gWindowWidth / (float)gWindowHeight, 0.1f, 200.0f);
// update the view (camera) position
glm::vec3 viewPos;
viewPos.x = fpsCamera.getPosition().x;
viewPos.y = fpsCamera.getPosition().y;
viewPos.z = fpsCamera.getPosition().z;
glm::vec3 CameraPosition(glm::inverse(view)[3]);
glm::mat4 ViewProjectionMatrix = projection * view;
//BEGIN PARTICLES
int newparticles = (int)(deltaTime * 10000.0);
if (newparticles > (int)(0.016f * 10000.0))
newparticles = (int)(0.016f * 10000.0);
for (int i = 0; i < newparticles; i++)
{
int particleIndex = FindUnusedParticle();
ParticlesContainer[particleIndex].life = 1.0f; // This particle will live 5 seconds.
ParticlesContainer[particleIndex].pos = glm::vec3(0, 0, -11.0f);
float spread = 1.5f;
glm::vec3 maindir = glm::vec3(0.0f, 10.0f, 0.0f);
// Very bad way to generate a random direction;
// See for instance http://stackoverflow.com/questions/5408276/python-uniform-spherical-distribution instead,
// combined with some user-controlled parameters (main direction, spread, etc)
glm::vec3 randomdir = glm::vec3(
(rand() % 2000 - 1000.0f) / 1000.0f,
(rand() % 2000 - 1000.0f) / 1000.0f,
(rand() % 2000 - 1000.0f) / 1000.0f);
ParticlesContainer[particleIndex].speed = maindir + randomdir * spread;
// Very bad way to generate a random color
ParticlesContainer[particleIndex].r = rand() % 256;
ParticlesContainer[particleIndex].g = rand() % 256;
ParticlesContainer[particleIndex].b = rand() % 256;
ParticlesContainer[particleIndex].a = (rand() % 256) / 3;
ParticlesContainer[particleIndex].size = (rand() % 1000) / 2000.0f + 0.1f;
}
// Simulate all particles
int ParticlesCount = 0;
for (int i = 0; i < MaxParticles; i++)
{
Particle &p = ParticlesContainer[i]; // shortcut
if (p.life > 0.0f)
{
// Decrease life
p.life -= deltaTime;
if (p.life > 0.0f)
{
// Simulate simple physics : gravity only, no collisions
p.speed += glm::vec3(0.0f, -9.81f, 0.0f) * (float)deltaTime * 0.5f;
p.pos += p.speed * (float)deltaTime;
// if (i == 1)
// {
// // std::cout << glm::to_string(p.pos) << std::endl;
// }
// std::cout << glm::to_string(p.pos) << std::endl;
p.cameradistance = glm::length2(p.pos - CameraPosition);
//ParticlesContainer[i].pos += glm::vec3(0.0f,10.0f, 0.0f) * (float)delta;
// Fill the GPU buffer
g_particule_position_size_data[4 * ParticlesCount + 0] = p.pos.x;
g_particule_position_size_data[4 * ParticlesCount + 1] = p.pos.y;
g_particule_position_size_data[4 * ParticlesCount + 2] = p.pos.z;
g_particule_position_size_data[4 * ParticlesCount + 3] = p.size;
g_particule_color_data[4 * ParticlesCount + 0] = p.r;
g_particule_color_data[4 * ParticlesCount + 1] = p.g;
g_particule_color_data[4 * ParticlesCount + 2] = p.b;
g_particule_color_data[4 * ParticlesCount + 3] = p.a;
}
else
{
// Particles that just died will be put at the end of the buffer in SortParticles();
p.cameradistance = -1.0f;
}
ParticlesCount++;
}
}
SortParticles();
glBindBuffer(GL_ARRAY_BUFFER, particles_position_buffer);
glBufferData(GL_ARRAY_BUFFER, MaxParticles * 4 * sizeof(GLfloat), NULL, GL_STREAM_DRAW); // Buffer orphaning, a common way to improve streaming perf. See above link for details.
glBufferSubData(GL_ARRAY_BUFFER, 0, ParticlesCount * sizeof(GLfloat) * 4, g_particule_position_size_data);
glBindBuffer(GL_ARRAY_BUFFER, particles_color_buffer);
glBufferData(GL_ARRAY_BUFFER, MaxParticles * 4 * sizeof(GLubyte), NULL, GL_STREAM_DRAW); // Buffer orphaning, a common way to improve streaming perf. See above link for details.
glBufferSubData(GL_ARRAY_BUFFER, 0, ParticlesCount * sizeof(GLubyte) * 4, g_particule_color_data);
glEnable(GL_BLEND);
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
// Use our shader
glUseProgram(programID);
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, Texture);
// Set our "myTextureSampler" sampler to use Texture Unit 0
glUniform1i(TextureID, 0);
glUniform3f(CameraRight_worldspace_ID, ViewMatrix[0][0], ViewMatrix[1][0], ViewMatrix[2][0]);
glUniform3f(CameraUp_worldspace_ID, ViewMatrix[0][1], ViewMatrix[1][1], ViewMatrix[2][1]);
glUniformMatrix4fv(ViewProjMatrixID, 1, GL_FALSE, &ViewProjectionMatrix[0][0]);
// 1rst attribute buffer : vertices
glEnableVertexAttribArray(0);
glBindBuffer(GL_ARRAY_BUFFER, billboard_vertex_buffer);
glVertexAttribPointer(
0, // attribute. No particular reason for 0, but must match the layout in the shader.
3, // size
GL_FLOAT, // type
GL_FALSE, // normalized?
0, // stride
(void *)0 // array buffer offset
);
glEnableVertexAttribArray(1);
glBindBuffer(GL_ARRAY_BUFFER, particles_position_buffer);
glVertexAttribPointer(
1, // attribute. No particular reason for 1, but must match the layout in the shader.
4, // size : x + y + z + size => 4
GL_FLOAT, // type
GL_FALSE, // normalized?
0, // stride
(void *)0 // array buffer offset
);
// 3rd attribute buffer : particles' colors
glEnableVertexAttribArray(2);
glBindBuffer(GL_ARRAY_BUFFER, particles_color_buffer);
glVertexAttribPointer(
2, // attribute. No particular reason for 1, but must match the layout in the shader.
4, // size : r + g + b + a => 4
GL_UNSIGNED_BYTE, // type
GL_TRUE, // normalized? *** YES, this means that the unsigned char[4] will be accessible with a vec4 (floats) in the shader ***
0, // stride
(void *)0 // array buffer offset
);
// These functions are specific to glDrawArrays*Instanced*.
// The first parameter is the attribute buffer we're talking about.
// The second parameter is the "rate at which generic vertex attributes advance when rendering multiple instances"
// http://www.opengl.org/sdk/docs/man/xhtml/glVertexAttribDivisor.xml
glVertexAttribDivisor(0, 0); // particles vertices : always reuse the same 4 vertices -> 0
glVertexAttribDivisor(1, 1); // positions : one per quad (its center) -> 1
glVertexAttribDivisor(2, 1); // color : one per quad -> 1
glDrawArraysInstanced(GL_TRIANGLE_STRIP, 0, 4, ParticlesCount);
glDisableVertexAttribArray(0);
glDisableVertexAttribArray(1);
glDisableVertexAttribArray(2);
//END OF PARTICLES
// Must be called BEFORE setting uniforms because setting uniforms is done
// on the currently active shader program.
lightingShader.use();
lightingShader.setUniform("model", glm::mat4(1.0)); // do not need to translate the models so just send the identity matrix
lightingShader.setUniform("view", view);
lightingShader.setUniform("projection", projection);
lightingShader.setUniform("viewPos", viewPos);
// // Directional light
lightingShader.setUniform("sunLight.direction", glm::vec3(0.0f, -0.9f, -0.17f));
lightingShader.setUniform("sunLight.ambient", glm::vec3(0.1f, 0.1f, 0.1f));
lightingShader.setUniform("sunLight.diffuse", glm::vec3(0.1f, 0.1f, 0.1f)); // dark
lightingShader.setUniform("sunLight.specular", glm::vec3(0.1f, 0.1f, 0.1f));
lightingShader.setUniform("spotLight.ambient", glm::vec3(0.1f, 0.1f, 0.1f));
lightingShader.setUniform("spotLight.diffuse", glm::vec3(0.8f, 0.8f, 0.8f));
lightingShader.setUniform("spotLight.specular", glm::vec3(1.0f, 1.0f, 1.0f));
lightingShader.setUniform("spotLight.position", glm::vec3(0.982347, 3.500000, 10.248156));
lightingShader.setUniform("spotLight.direction", glm::vec3(-0.202902, -0.470038, -0.859008));
lightingShader.setUniform("spotLight.cosInnerCone", glm::cos(glm::radians(15.0f)));
lightingShader.setUniform("spotLight.cosOuterCone", glm::cos(glm::radians(20.0f)));
lightingShader.setUniform("spotLight.constant", 1.0f);
lightingShader.setUniform("spotLight.linear", 0.007f);
lightingShader.setUniform("spotLight.exponent", 0.0017f);
lightingShader.setUniform("spotLight.on", gFlashlightOn);
// Render the scene
for (int i = 0; i < 1; i++)
{
model = glm::translate(glm::mat4(1.0), modelPos[i]) * glm::scale(glm::mat4(1.0), modelScale[i]); // * glm::rotate(glm::mat4(1.0), glm::radians((float)(glfwGetTime() * 100.0f)), glm::vec3(1.0f, 0.0f, 0.0f));
;
lightingShader.setUniform("model", model);
// // Set material properties
lightingShader.setUniform("material.ambient", glm::vec3(0.1f, 0.1f, 0.1f));
lightingShader.setUniformSampler("material.diffuseMap", 0);
lightingShader.setUniform("material.specular", glm::vec3(0.8f, 0.8f, 0.8f));
lightingShader.setUniform("material.shininess", 32.0f);
texture[i].bind(0); // set the texture before drawing. Our simple OBJ mesh loader does not do materials yet.
mesh[i].draw(); // Render the OBJ mesh
texture[i].unbind(0);
}
// Swap front and back buffers
glfwSwapBuffers(gWindow);
mac_patch(gWindow);
lastTime = currentTime;
}
And only the 3D scene is getting rendered like :
And when I comment the rendering of the mesh logic out, ie (This section)
for (int i = 0; i < 1; i++)
{
model = glm::translate(glm::mat4(1.0), modelPos[i]) * glm::scale(glm::mat4(1.0), modelScale[i]); // * glm::rotate(glm::mat4(1.0), glm::radians((float)(glfwGetTime() * 100.0f)), glm::vec3(1.0f, 0.0f, 0.0f));
;
lightingShader.setUniform("model", model);
// // Set material properties
lightingShader.setUniform("material.ambient", glm::vec3(0.1f, 0.1f, 0.1f));
lightingShader.setUniformSampler("material.diffuseMap", 0);
lightingShader.setUniform("material.specular", glm::vec3(0.8f, 0.8f, 0.8f));
lightingShader.setUniform("material.shininess", 32.0f);
texture[i].bind(0); // set the texture before drawing. Our simple OBJ mesh loader does not do materials yet.
mesh[i].draw(); // Render the OBJ mesh
texture[i].unbind(0);
}
I get :
How would I render both of them at the same time?
My codebase: github
You simply missed to bind the vertex array object for the particles, before specifying the vertex attribute arrays for the particls:
while (!glfwWindowShouldClose(gWindow))
{
// [...]
glBindVertexArray(VertexArrayID); // <--- this is missing
glEnableVertexAttribArray(0);
glBindBuffer(GL_ARRAY_BUFFER, billboard_vertex_buffer);
glVertexAttribPointer(
// [...]
);
// [...]
glDrawArraysInstanced(GL_TRIANGLE_STRIP, 0, 4, ParticlesCount);
// [...]
}
But note it is sufficient to specify the arrays of generic vertex attribute data once and to bind the vertex array object for drawing:
glBindVertexArray(VertexArrayID);
// 1rst attribute buffer : vertices
glEnableVertexAttribArray(0);
glBindBuffer(GL_ARRAY_BUFFER, billboard_vertex_buffer);
glVertexAttribPointer(
// [...]
);
glEnableVertexAttribArray(1);
glBindBuffer(GL_ARRAY_BUFFER, particles_position_buffer);
glVertexAttribPointer(
// [...]
);
// 3rd attribute buffer : particles' colors
glEnableVertexAttribArray(2);
glBindBuffer(GL_ARRAY_BUFFER, particles_color_buffer);
glVertexAttribPointer(
// [...]
);
glVertexAttribDivisor(0, 0); // particles vertices : always reuse the same 4 vertices -> 0
glVertexAttribDivisor(1, 1); // positions : one per quad (its center) -> 1
glVertexAttribDivisor(2, 1); // color : one per quad -> 1
while (!glfwWindowShouldClose(gWindow))
{
// [...]
glBindVertexArray(VertexArrayID);
glDrawArraysInstanced(GL_TRIANGLE_STRIP, 0, 4, ParticlesCount);
// [...]
for (int i = 0; i < 1; i++)
{
// [...]
texture[i].bind(0);
mesh[i].draw();
texture[i].unbind(0);
}
// [...]
}

In vulkan: I want save a depth image to file, but always got a error depth image

I want to save a depth image that from frame buffer render result.
1, I create a stage buffer used to save image data.
2, use vkCmdCopyImageToBuffer copy depth image to stage buffer.
3, use vkMapMemory map this stage buffer memory to host memory.
4, read host memory and write depth data to a file.
but always got an error depth image. I don't know where have wrong.
application window output.
bug depth image file.
(source file)
save depth image function:
VkDeviceSize size = WIDTH * HEIGHT * 4;
VkBuffer dstBuffer;
VkDeviceMemory dstMemory;
createBuffer(
size,
VK_BUFFER_USAGE_TRANSFER_DST_BIT,
VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT,
dstBuffer,
dstMemory);
VkCommandBuffer copyCmd = beginSingleTimeCommands();
// depth format -> VK_FORMAT_D32_SFLOAT_S8_UINT
VkBufferImageCopy region = {};
region.bufferOffset = 0;
region.bufferImageHeight = 0;
region.bufferRowLength = 0;
region.imageSubresource.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT;
region.imageSubresource.mipLevel = 0;
region.imageSubresource.baseArrayLayer = 0;
region.imageSubresource.layerCount = 1;
region.imageOffset = VkOffset3D{ 0, 0, 0 };
region.imageExtent = VkExtent3D{ swapChainExtent.width, swapChainExtent.height, 1};
vkCmdCopyImageToBuffer(
copyCmd,
depthImage, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
dstBuffer,
1,
&region
);
endSingleTimeCommands(copyCmd);
// Map image memory so we can start copying from it
void *data;
vkMapMemory(device, dstMemory, 0, size, 0, &data);
std::ofstream file(path, std::ios::out | std::ios::binary);
// ppm header
file << "P6\n" << WIDTH << "\n" << HEIGHT << "\n" << 255 << "\n";
float *row = (float*)data;
auto size_v = WIDTH * HEIGHT;
for (uint32_t y = 0; y < size_v; y++) {
file.write((char*)row + 1, 1);
file.write((char*)row + 1, 1);
file.write((char*)row + 1, 1);
row++;
}
file.close();
// Clean up resources
vkUnmapMemory(device, dstMemory);
vkFreeMemory(device, dstMemory, nullptr);
vkDestroyBuffer(device, dstBuffer, nullptr);
hope someone drag me out. thanks!
Assuming you've done all the transfer work correctly, your mapped data is basically an array of floats. This is reflected in your code by this line:
float *row = (float*)data;
However, when you actually write out the file you're treating the data like bytes...
file.write((char*)row + 1, 1);
So you're writing out 8 bytes of a 32 bit float. What you need is some function to convert from the float to a color value.
Assuming the depth value is normalized (I can't remember off the top of my head whether this is the case, or if it's dependent on the pipeline or framebuffer setup) and if you just want greyscale, you could use
uint8_t map(float f) {
return (uint8_t)(f * 255.0f);
}
and inside your file writing loop you'd so something like
uint8_t grey = map(*row);
file.write(&grey, 1);
file.write(&grey, 1);
file.write(&grey, 1);
++row;
Alternatively if you want some sort of color gradient for easier visulization you'd want a more complex mapping function...
vec3 colorWheel(float normalizedHue) {
float v = normalizedHue * 6.f;
if (v < 0.f) {
return vec3(1.f, 0.f, 0.f);
} else if (v < 1.f) {
return vec3(1.f, v, 0.f);
} else if (v < 2.f) {
return vec3(1.f - (v-1.f), 1.f, 0.f);
} else if (v < 3.f) {
return vec3(0.f, 1.f, (v-2.f));
} else if (v < 4.f) {
return vec3(0.f, 1.f - (v-3.f), 1.f );
} else if (v < 5.f) {
return vec3((v-4.f), 0.f, 1.f );
} else if (v < 6.f) {
return vec3(1.f, 0.f, 1.f - (v-5.f));
} else {
return vec3(1.f, 0.f, 0.f);
}
}
and in your file output loop...
vec3 color = colorWheel(*row);
uint8_t r = map(color.r);
uint8_t g = map(color.g);
uint8_t b = map(color.b);
file.write(&r, 1);
file.write(&g, 1);
file.write(&b, 1);
++row;

Using GL_RG_EXT and GL_UNSINGED_BYTE

I met some problems while using GL_RG_EXT and GL_UNSIGNED_BYTE. Related code:
class TextureBuffer {
public:
GLuint texture;
GLuint frameBuffer;
GLenum internalformat;
GLenum format;
GLenum type;
int w,h;
TextureBuffer() : texture(0), frameBuffer(0) {}
void release() {
if(texture)
{
glDeleteTextures(1, &texture);
texture = 0;
}
if(frameBuffer)
{
glDeleteFramebuffers(1, &frameBuffer);
frameBuffer = 0;
}
}
};
TextureBuffer _maskTexture;
generateRenderToTexture(GL_RG_EXT, GL_RG_EXT, GL_UNSIGNED_BYTE, _maskTexture, _imageWidth, _imageHeight, false);
void SharpenGPU::generateRenderToTexture(GLint internalformat, GLenum format, GLenum type,
TextureBuffer &tb, int w, int h, bool linearInterp)
{
glGenTextures(1, &tb.texture);
glBindTexture(GL_TEXTURE_2D, tb.texture);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, linearInterp ? GL_LINEAR : GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, linearInterp ? GL_LINEAR : GL_NEAREST);
glTexImage2D(GL_TEXTURE_2D, 0, internalformat, w, h, 0, format, type, NULL);
glGenFramebuffers(1, &tb.frameBuffer);
glBindFramebuffer(GL_FRAMEBUFFER, tb.frameBuffer);
glClear(_glClearBits);
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, tb.texture, 0);
GLenum status = glCheckFramebufferStatus(GL_FRAMEBUFFER);
if(status != GL_FRAMEBUFFER_COMPLETE)
printf("Framebuffer status: %x", (int)status);
tb.internalformat = internalformat;
tb.format = format;
tb.type = type;
tb.w = w;
tb.h = h;
}
When I use the following code to define _maskTexture,
generateRenderToTexture(GL_RG_EXT, GL_RG_EXT, GL_HALF_FLOAT_OES, _maskTexture, _imageWidth, _imageHeight, false);
the code goes well. But if I use the lines below to define the _maskTexture, an error appears,
generateRenderToTexture(GL_RG_EXT, GL_RG_EXT, GL_UNSIGNED_BYTE, _maskTexture, _imageWidth, _imageHeight, false);
Error:
ProgramInfoLog: Validation Failed: Fragment program failed to compile with current context state.
Validation Failed: Vertex program failed to compile with current context state.
I'm really puzzled about it.
I knew that the error info is caused by compiling the vertex and shader function. Here is the shader function I found to cause the error:
{
const GLchar fShaderText[] = FRAGMENT_SHADER_SOURCE
(
uniform sampler2D Distance4d; // unsimilar with version 6, patch distance has been prepared.
uniform sampler2D DistanceCenter;
varying highp vec2 uv0;
void main()
{
highp float u_dx = 1./imageWH.x;
highp float v_dy = 1./imageWH.y;
highp vec2 ItBt = texture2D(DistanceCenter, uv0).yz;
highp vec2 direcXY;
highp float remainX = floor(mod(floor(imageWH.x * uv0.x + 0.6),2.) + 0.5); // 0 or 1
highp float remainY = floor(mod(floor(imageWH.y * uv0.y + 0.6),2.) + 0.5); // 0 or 1;
{
//center
highp float sum0 = texture2D(DistanceCenter, uv0).x;
highp float sumMin = sum0;
direcXY = vec2(0.,0.);
highp vec4 sum4d = texture2D(Distance4d, uv0);
//left
if(sum4d.x < sumMin)
{
sumMin = sum4d.x;
direcXY = vec2(-u_dx,0.);
}
//up
if(sum4d.y < sumMin)
{
sumMin = sum4d.y;
direcXY = vec2(0.,v_dy);
}
//right
if(sum4d.z < sumMin)
{
sumMin = sum4d.z;
direcXY = vec2(u_dx,0.);
}
//down
if(sum4d.w < sumMin) // when i disable this line, the error info will disappear
{
sumMin = sum4d.w;
direcXY = vec2(0.,-v_dy);
}
direcXY = (sumMin/sum0 > 0.7)? vec2(0.,0.):direcXY;// Section 4.1.1. thresholding. for that center position is preferred
}
gl_FragColor = vec4(ItBt.x, ItBt.x - ItBt.y, direcXY.x, direcXY.y);
//vec4(It, It - Bt, dx, dy);
}
);
// Store the progrm, compute uniform locations
ProgramUniforms &pu = (_programs["findP2SpeedUpforS7"] = ProgramUniforms());
pu.program = compileShaders(gVertexShaderText, fShaderText);
pu.uniformMap["mvpMatrix"] = glGetUniformLocation(pu.program, "mvpMatrix");
pu.uniformMap["Distance4d"] = glGetUniformLocation(pu.program, "Distance4d");
pu.uniformMap["DistanceCenter"] = glGetUniformLocation(pu.program, "DistanceCenter");
pu.uniformMap["imageWH"] = glGetUniformLocation(pu.program, "imageWH");
}
I have marked out the related line causing the error.
Did someone meet a similar case?
Thanks.

Use of GL_STENCIL_TEST to render concave polygons

I'm working on a custom geometry library adapted to Quartz Composer and trying to draw some concave polygons in a Plug In.
I implemented the poly2tri library, so the user can choose to triangulate or not, but it's not suitable for a per-frame polygon transformations rendering.
I'm a noob in OpenGL and I've been reading and testing a lot, about stencil buffer and odd/even operations, but even code that seem to work for other people, doesn't work for me.
The render context is a CGLContextObj, and I'm working on a MacBook Pro Retina Display, with NVidia GEForce GT650. I read that all configurations don't have stencil buffers, but it look like it works sometimes, though not as I would like it to.
I was wondering if someone with the same kind of config was using a code that works and could take a look at my code. In particular, I'm curious too about the number of passes requested, according to the number of vertices or "convexity defects" I guess...
I took my infos from :
http://fly.cc.fer.hr/~unreal/theredbook/chapter13.html
http://commaexcess.com/articles/7/concave-polygon-triangulation-shortcut
http://graphicsbb.itgo.com/solutions/extrude.html
http://analysesmusings.wordpress.com/2012/07/13/drawing-filled-concave-polygons-using-the-stencil-buffer/
... but still not clear...
Here is my code (one of them in fact, as I tested so much configurations) and a picture of the result. Actually I use to put the actual rendering in a method called for each polygon, but I rewrote it to be much clear :
EDIT
In fact I understood that I have to draw each triangle, in order to invert the bit value in the stencil buffer. So I rewrote my code into this :
CGLContextObj cgl_ctx = [context CGLContextObj];
CGLLockContext(cgl_ctx);
GLenum error;
if(cgl_ctx == NULL)
return NO;
glPushAttrib(GL_ALL_ATTRIB_BITS);
glPushClientAttrib(GL_CLIENT_ALL_ATTRIB_BITS);
glEnable(GL_BLEND);
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
glDisable(GL_CULL_FACE);
glClear(GL_STENCIL_BUFFER_BIT);
glClearStencil(0);
glEnable(GL_STENCIL_TEST);
glStencilOp(GL_INVERT, GL_INVERT, GL_INVERT);
glStencilFunc(GL_ALWAYS, 1, 1);
glColorMask(GL_FALSE, GL_FALSE, GL_FALSE, GL_FALSE);
// glColor4d(1., 1., 1., 1.); ----> does it make sense ?
glBegin(GL_TRIANGLE_FAN); {
for (int i = 1; i < [vertices count] - 1; i++) {
// Allways drawing the first vertex
glVertex2d([[[vertices objectAtIndex:0] objectAtIndex:0] doubleValue], [[[vertices objectAtIndex:0] objectAtIndex:1] doubleValue]);
// Then two others to make a triangle
glVertex2d([[[vertices objectAtIndex:i] objectAtIndex:0] doubleValue], [[[vertices objectAtIndex:i] objectAtIndex:1] doubleValue]);
glVertex2d([[[vertices objectAtIndex:i+1] objectAtIndex:0] doubleValue], [[[vertices objectAtIndex:i+1] objectAtIndex:1] doubleValue]);
}
}
glEnd();
glStencilOp(GL_ZERO, GL_ZERO, GL_ZERO);
glStencilFunc(GL_EQUAL, 1, 1);
glColorMask(GL_TRUE, GL_TRUE, GL_TRUE, GL_TRUE);
glColor4d(1., 0., 0., 0.5);
glBegin(GL_TRIANGLE_FAN); {
for (id vertex in vertices) {
glVertex2d([[vertex objectAtIndex:0] doubleValue], [[vertex objectAtIndex:1] doubleValue]);
}
glVertex2d([[[vertices objectAtIndex:0] objectAtIndex:0] doubleValue], [[[vertices objectAtIndex:0] objectAtIndex:1] doubleValue]);
}
glEnd();
glDisable (GL_STENCIL_TEST);
glDisable(GL_BLEND);
glPopClientAttrib();
glPopAttrib();
if((error = glGetError()))
NSLog(#"OpenGL error %04X", error);
CGLUnlockContext(cgl_ctx);
return (error ? NO : YES);
But it still doesn't work. Here is my result and the link to the original image and the explanation.
http://what-when-how.com/opengl-programming-guide/drawing-filled-concave-polygons-using-the-stencil-buffer-opengl-programming/
EDIT 2 :
In fact, the context enabled by Quartz Composer doesn't implement a stencil buffer. It seems impossible to render directly in OpenGL with the stencil buffer.
...
glClearStencil(0);
...
Be aware that glClearStencil() just sets a bit of state and doesn't actually clear the stencil buffer.
Try adding a glClear( GL_STENCIL_BUFFER_BIT ) somewhere before each polygon.
EDIT: Like this:
#include <GL/glut.h>
#include <glm/glm.hpp>
#include <glm/gtc/type_ptr.hpp>
#include <vector>
std::vector< glm::vec2 > pts;
bool leftHeld = true;
glm::vec2* dragPt = NULL;
void mouse( int button, int state, int x, int y )
{
glm::vec2 pt( x, glutGet( GLUT_WINDOW_HEIGHT ) - y );
// left mouse button starts dragging a point
dragPt = NULL;
leftHeld = false;
if( button == GLUT_LEFT_BUTTON && state == GLUT_DOWN )
{
leftHeld = true;
size_t minIdx = 0;
for( size_t i = 0; i < pts.size(); ++i )
{
float newDist = glm::distance( pt, pts[ i ] );
float oldDist = glm::distance( pt, pts[ minIdx ] );
if( newDist <= oldDist && newDist < 15.0f )
{
minIdx = i;
dragPt = &pts[ minIdx ];
}
}
}
// middle mouse button clears all points
if( button == GLUT_MIDDLE_BUTTON && state == GLUT_UP )
{
pts.clear();
}
// right mouse button adds a point
if( button == GLUT_RIGHT_BUTTON && state == GLUT_UP )
{
pts.push_back( pt );
}
glutPostRedisplay();
}
void motion( int x, int y )
{
glm::vec2 pt( x, glutGet( GLUT_WINDOW_HEIGHT ) - y );
if( dragPt && leftHeld )
{
*dragPt = pt;
glutPostRedisplay();
}
}
void glLine( const std::vector< glm::vec2 >& line, GLenum mode )
{
glBegin( mode );
for( size_t i = 0; i < line.size(); ++i )
{
glVertex2f( line[i].x, line[i].y );
}
glEnd();
}
void display()
{
glClear( GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT );
glMatrixMode( GL_PROJECTION );
glLoadIdentity();
double w = glutGet( GLUT_WINDOW_WIDTH );
double h = glutGet( GLUT_WINDOW_HEIGHT );
glOrtho( 0, w, 0, h, -1, 1 );
glMatrixMode( GL_MODELVIEW );
glLoadIdentity();
// draw polygon
glClear( GL_STENCIL_BUFFER_BIT );
{
// fill stencil buffer
glEnable( GL_STENCIL_TEST );
glColorMask( GL_FALSE,GL_FALSE,GL_FALSE,GL_FALSE );
glStencilOp( GL_KEEP, GL_KEEP, GL_INVERT );
glStencilFunc( GL_ALWAYS, 0x1, 0x1 );
glBegin( GL_TRIANGLES );
for( size_t i = 1; i+1 < pts.size(); ++i )
{
glVertex2fv( glm::value_ptr( pts[ 0 ] ) );
glVertex2fv( glm::value_ptr( pts[ i ] ) );
glVertex2fv( glm::value_ptr( pts[ i+1 ] ) );
}
glEnd();
// fill color buffer
glColor3ub( 0, 128, 0 );
glColorMask( GL_TRUE, GL_TRUE, GL_TRUE, GL_TRUE );
glStencilFunc( GL_EQUAL, 0x1, 0x1 );
glBegin( GL_TRIANGLES );
for( size_t i = 1; i+1 < pts.size(); ++i )
{
glVertex2fv( glm::value_ptr( pts[ 0 ] ) );
glVertex2fv( glm::value_ptr( pts[ i ] ) );
glVertex2fv( glm::value_ptr( pts[ i+1 ] ) );
}
glEnd();
glDisable( GL_STENCIL_TEST );
}
// draw polygon boundary
glLineWidth( 1 );
glColor3ub( 255, 255, 255 );
glLine( pts, GL_LINE_LOOP );
// draw vertexes
glPointSize( 9 );
glColor3ub( 255, 0, 0 );
glLine( pts, GL_POINTS );
glutSwapBuffers();
}
int main( int argc, char **argv )
{
glutInit( &argc, argv );
glutInitDisplayMode( GLUT_RGBA | GLUT_DEPTH | GLUT_DOUBLE | GLUT_STENCIL );
glutInitWindowSize( 640, 480 );
glutCreateWindow( "GLUT" );
glutMouseFunc( mouse );
glutMotionFunc( motion );
glutDisplayFunc( display );
glutMainLoop();
return 0;
}

Windows 8 Store App DirectX 11.1 Enabling Blendstate with loaded PNG texture with alpha

I am loading a PNG with transparency to a texture with the following code:
ComPtr<IWICStream> stream;
ComPtr<IWICBitmapDecoder> bitmapDecoder;
ComPtr<IWICBitmapFrameDecode> bitmapFrame;
ComPtr<IWICFormatConverter> formatConverter;
unsigned int width, height
D3D11_SUBRESOURCE_DATA resourceData;
ZeroMemory(&resourceData, sizeof(resourceData));
DX::ThrowIfFailed( m_wicFactory->CreateStream(&stream) );
DX::ThrowIfFailed( stream->InitializeFromMemory( rawFileBytes->Data, rawFileBytes->Length) );
DX::ThrowIfFailed( m_wicFactory->CreateDecoderFromStream( stream.Get(), nullptr, WICDecodeMetadataCacheOnDemand, &bitmapDecoder ) );
DX::ThrowIfFailed( bitmapDecoder->GetFrame(0, &bitmapFrame) );
DX::ThrowIfFailed( m_wicFactory->CreateFormatConverter(&formatConverter) );
DX::ThrowIfFailed( formatConverter->Initialize( bitmapFrame.Get(), GUID_WICPixelFormat32bppPBGRA, WICBitmapDitherTypeNone, nullptr, 1.0f /* some docs set this to 0.0f */, WICBitmapPaletteTypeCustom ) );
DX::ThrowIfFailed( bitmapFrame->GetSize(&width, &height) );
std::unique_ptr<byte[]> bitmapPixels(new byte[width * height * 4]);
DX::ThrowIfFailed( formatConverter->CopyPixels( nullptr, width * 4, width * height * 4, bitmapPixels.get() ) );
resourceData.pSysMem = bitmapPixels.get();
resourceData.SysMemPitch = width * 4;
resourceData.SysMemSlicePitch = 0;
CD3D11_TEXTURE2D_DESC textureDesc( DXGI_FORMAT_B8G8R8A8_UNORM, width, height, 1, 1 );
DX::ThrowIfFailed( m_d3dDevice->CreateTexture2D( &textureDesc, &resourceData, &texture2D ) );
if ( textureView != nullptr ) {
CD3D11_SHADER_RESOURCE_VIEW_DESC shaderResourceViewDesc( texture2D.Get(), D3D11_SRV_DIMENSION_TEXTURE2D );
DX::ThrowIfFailed( m_d3dDevice->CreateShaderResourceView( texture2D.Get(), &shaderResourceViewDesc, &shaderResourceView ) );
}
And to set up my blend state in a different section of code:
Microsoft::WRL::ComPtr<ID3D11BlendState1> blendState;
D3D11_BLEND_DESC1 desc;
ZeroMemory( &desc, sizeof( desc ) );
desc.IndependentBlendEnable = FALSE;
desc.AlphaToCoverageEnable = FALSE;
desc.RenderTarget[0].BlendEnable = TRUE;
desc.RenderTarget[0].LogicOpEnable = FALSE;
desc.RenderTarget[0].SrcBlend = D3D11_BLEND::D3D11_BLEND_SRC_ALPHA;
desc.RenderTarget[0].DestBlend = D3D11_BLEND::D3D11_BLEND_INV_SRC_ALPHA;
desc.RenderTarget[0].BlendOp = D3D11_BLEND_OP::D3D11_BLEND_OP_ADD;
desc.RenderTarget[0].SrcBlendAlpha = D3D11_BLEND::D3D11_BLEND_ONE;
desc.RenderTarget[0].DestBlendAlpha = D3D11_BLEND::D3D11_BLEND_ONE;
desc.RenderTarget[0].BlendOpAlpha = D3D11_BLEND_OP::D3D11_BLEND_OP_ADD;
desc.RenderTarget[0].RenderTargetWriteMask = D3D11_COLOR_WRITE_ENABLE_ALL;
direct3d.device->CreateBlendState1( &desc, blendState.GetAddressOf() );
direct3d.context->OMSetBlendState( blendState.Get(), NULL, 0xffffffff );
With all this setup, I still get black backgrounds where there should be alpha.
Clarification: I have a set of 48 square panels that are being overlayed on each other from z 0.0f to 48.0f, but all I can see is the very front 48.0f texture. Rather than the transparent zones being transparent, they are being rendered as black.
Edit: Here is my pixel shader:
Texture2D Texture : register(t0);
SamplerState Sampler : register(s0);
struct sPSInput
{
float4 pos : SV_POSITION;
float3 norm : NORMAL;
float2 tex : TEXCOORD0;
};
float4 SimplePixelShader(sPSInput input) : SV_TARGET
{
float4 textured = Texture.Sample(Sampler, input.tex);
return textured;
}
This
desc.RenderTarget[0].SrcBlend = D3D11_BLEND::D3D11_BLEND_ONE;
should be this
desc.RenderTarget[0].SrcBlend = D3D11_BLEND::D3D11_BLEND_ALPHA;
also AlphaToCoverageEnable should be set false for your needs
Furthermore you have to disable the Z buffer. The enabled Z buffer prevents all objects behind the object in front to be drawn.
You need to normalize your Z-values to the range 0.0f (near) - 1.0f (far).
Are you setting up your ViewPort properly?
D3D11_VIEWPORT Viewport =
{
0.0f, // TopLeftX
0.0f, // TopLeftY
<width>, // Width
<height>, // Height
0.0f, // MinDepth
1.0f }; // MaxDepth