Moving a variable in my fragment shader makes it either not work or completely crash my PC - crash

When running on a Radeon HD 7750 and declaring thisMaterialsource at (A), the program either crashes or freezes the PC to the point I have to power cycle the machine. It works fine when it's declared at position (B). When running on a Geforce GTX 1070, it works fine in both cases.
void main()
{
vec3 ambientSum = vec3(0);
vec3 diffuseSum = vec3(0);
vec3 specSum = vec3(0);
vec3 ambient, diffuse, spec;
// (A) - doesn't work when declared/set here <----------------------------------------
// Material thisMaterialsource = materialBanks[0].bank.materials[materialId];
if (gl_FrontFacing)
{
for (int i=0; i<light.activeLights; ++i)
{
calculateLight(i, inWorldPos.xyz, inNormal.xyz, ambient, diffuse, spec);
ambientSum += ambient;
diffuseSum += diffuse;
specSum += spec;
}
}
else
{
for (int i=0; i<light.activeLights; ++i)
{
calculateLight(i, inWorldPos.xyz, -inNormal.xyz, ambient, diffuse, spec);
ambientSum += ambient;
diffuseSum += diffuse;
specSum += spec;
}
}
ambientSum /= light.activeLights;
// (B) - works when declared/set here <----------------------------------------
Material thisMaterialsource = materialBanks[0].bank.materials[materialId];
vec4 texColor = thisMaterialsource.baseColorFactor;
if(thisMaterialsource.colorTextureIndex > -1){ texColor = texture(texSampler[thisMaterialsource.colorTextureIndex], inUV0) * thisMaterialsource.baseColorFactor; }
vec4 emissive = thisMaterialsource.emissiveFactor;
if (thisMaterialsource.unlitTextureIndex > -1) {
emissive = texture(texSampler[thisMaterialsource.unlitTextureIndex], inUV0) * thisMaterialsource.emissiveFactor;
}
outColor = vec4(ambientSum + diffuseSum, 1) * texColor + vec4(specSum, 1) + emissive;
}
Full shader code:
#version 450
#extension GL_ARB_separate_shader_objects : enable
#extension GL_EXT_nonuniform_qualifier : require
struct LightInfo
{
vec3 Position;//Light Position in eye-coords
vec3 La;//Ambient light intensity
vec3 Ld;//Diffuse light intensity
vec3 Ls;//Specular light intensity
};
struct MaterialInfo
{
vec3 Ka;//Ambient reflectivity
vec3 Kd;//Diffuse reflectivity
vec3 Ks;//Specular reflectivity
float Shininess;//Specular shininess factor
};
struct Material{
vec4 baseColorFactor;
vec4 emissiveFactor;
float metallicFactor;
float roughnessFactor;
float normalScale;
float occlusionStrength;
int colorTextureIndex;
int normalTextureIndex;
int unlitTextureIndex;
int ambientOcclusionTextureIndex;
int metallicRoughnessTextureIndex;
int isTwoSided;
int alphaMode;
float alphaCutoff;
};
struct MaterialBank{
Material materials[80];
};
struct LightData{
vec4 pos;
vec4 color;
};
#define MAX_CAMERAS 16
struct CameraData{
vec4 pos;
mat4 mat;
mat4 view;
mat4 proj;
mat4 clip;
};
layout(push_constant) uniform PushConsts {
uint cameraIndex;
uint time;
} pushConsts;
layout(binding = 0) uniform UniformBufferCamera {
CameraData cameras[MAX_CAMERAS];
uint cameraCount;
uint cameraMax;
} cam;
layout(binding = 1) uniform UniformBufferLight {
LightData lights[16];
vec4 ambientColor;
int activeLights;
} light;
layout(set=1, binding = 0) uniform sampler2D texSampler[32];
layout(set=2, binding = 0) uniform UniformBufferMat {
MaterialBank bank;
} materialBanks[1];
layout(location = 0) in vec4 inNormal;
layout(location = 1) in vec2 inUV0;
layout(location = 2) in vec2 inUV1;
layout(location = 3) in vec4 inWorldPos;
layout(location = 4) in flat uint materialId;
layout(location = 0) out vec4 outColor;
void calculateLight(int lightIndex, vec3 position, vec3 norm, out vec3 ambient, out vec3 diffuse, out vec3 spec)
{
LightData thisLightSource = light.lights[lightIndex];
Material thisMaterialsource = materialBanks[0].bank.materials[materialId];
LightInfo thisLight;
thisLight.Position = thisLightSource.pos.xyz;//Light Position in eye-coords
thisLight.La = light.ambientColor.rgb;//Ambient light intensity
thisLight.Ld = thisLightSource.color.rgb;//Diffuse light intensity
thisLight.Ls = thisLightSource.color.rgb;//Specular light intensity
MaterialInfo thisMaterial;
vec4 texColor = thisMaterialsource.baseColorFactor;
if (thisMaterialsource.colorTextureIndex > -1){ texColor = texture(texSampler[thisMaterialsource.colorTextureIndex], inUV0) * thisMaterialsource.baseColorFactor; }
vec4 mrSample = vec4(1);
if (thisMaterialsource.metallicRoughnessTextureIndex > -1) { mrSample = texture(texSampler[thisMaterialsource.metallicRoughnessTextureIndex], inUV0); }
float perceptualRoughness = mrSample.g * thisMaterialsource.roughnessFactor;
float metallic = mrSample.b * thisMaterialsource.metallicFactor;
thisMaterial.Ka= texColor.rgb * (metallic+perceptualRoughness)/2;//Ambient reflectivity
thisMaterial.Kd= texColor.rgb * (perceptualRoughness);//Diffuse reflectivity
thisMaterial.Ks= texColor.rgb * (metallic-perceptualRoughness);//Specular reflectivity
thisMaterial.Shininess= (metallic);//Specular shininess factor
vec3 n = normalize(norm);
vec3 s = normalize(thisLight.Position - position);
vec3 v = normalize(-position);
vec3 r = reflect(-s, n);
ambient = thisLight.La * thisMaterial.Ka;
if (thisMaterialsource.ambientOcclusionTextureIndex > -1){
float ao = texture(texSampler[thisMaterialsource.ambientOcclusionTextureIndex], inUV0).r;
ambient = ambient * ao;
}
float sDotN = max(dot(s, n), 0.0);
diffuse = thisLight.Ld * thisMaterial.Kd * sDotN;
spec = thisLight.Ls * thisMaterial.Ks * pow(max(dot(r, v), 0.0), thisMaterial.Shininess);
}
void main()
{
vec3 ambientSum = vec3(0);
vec3 diffuseSum = vec3(0);
vec3 specSum = vec3(0);
vec3 ambient, diffuse, spec;
// (A) - doesn't work when declared/set here <----------------------------------------
// Material thisMaterialsource = materialBanks[0].bank.materials[materialId];
if (gl_FrontFacing)
{
for (int i=0; i<light.activeLights; ++i)
{
calculateLight(i, inWorldPos.xyz, inNormal.xyz, ambient, diffuse, spec);
ambientSum += ambient;
diffuseSum += diffuse;
specSum += spec;
}
}
else
{
for (int i=0; i<light.activeLights; ++i)
{
calculateLight(i, inWorldPos.xyz, -inNormal.xyz, ambient, diffuse, spec);
ambientSum += ambient;
diffuseSum += diffuse;
specSum += spec;
}
}
ambientSum /= light.activeLights;
// (B) - works when declared/set here <----------------------------------------
Material thisMaterialsource = materialBanks[0].bank.materials[materialId];
vec4 texColor = thisMaterialsource.baseColorFactor;
if(thisMaterialsource.colorTextureIndex > -1){ texColor = texture(texSampler[thisMaterialsource.colorTextureIndex], inUV0) * thisMaterialsource.baseColorFactor; }
vec4 emissive = thisMaterialsource.emissiveFactor;
if (thisMaterialsource.unlitTextureIndex > -1) {
emissive = texture(texSampler[thisMaterialsource.unlitTextureIndex], inUV0) * thisMaterialsource.emissiveFactor;
}
outColor = vec4(ambientSum + diffuseSum, 1) * texColor + vec4(specSum, 1) + emissive;
}
Please excuse the quality of my shader code, I'm just experimenting and cobbling stuff together, and came upon this issue that, aside from being annoying to debug, completely blindsided me.
It's fixed now, but I'd like to know why it happened and honestly, unlike lots of other issues I've dealt with while learning, I don't even know where to start looking.
Is this simply a bug in GPU/drivers or a manifestation of some profound and arcane machinations that dictate how shaders work? How can I debug this sort of issues? Is there a way to see this is going to fail, other than running it? I'd really like to know, I care much more about learning from this than just getting it to run.

If you don't mind going through one or two more power-off-on cycles, try the following simplification for your shader:
void main()
{
... variables
// (A) - doesn't work when declared/set here <----------------------------------------
// Material thisMaterialsource = materialBanks[0].bank.materials[materialId];
/// Instead of doing two almost identical loops, extract the normal inversion
vec3 normal = (gl_FrontFacing ? inNormal : -inNormal).xyz;
for (int i=0; i<light.activeLights; ++i)
{
calculateLight(i, inWorldPos.xyz, normal, ambient, diffuse, spec);
ambientSum += ambient;
diffuseSum += diffuse;
specSum += spec;
}
ambientSum /= light.activeLights;
// (B) - works when declared/set here <----------------------------------------
Material thisMaterialsource = materialBanks[0].bank.materials[materialId];
... all the rest
}
A wild guess might be the following: the HD7750 is relatively old and two loops over (dynamic number) of active lights can generate too much shader bytecode, if the GLSL compiler does something strange. So you get an overflow of available thread memory. The GTX 1070 is obviously much more powerful and would not suffer from an "abuse" like this.
Other than this, the shader should be fine and the above change is still a workaround, not a must. We have encountered strange behavior (i.e., somehting not working contrary to the spec) of GLSL even on newer Radeons, but it was not similar to your problem.

Related

How to color individual pixels with OpenGL ES 2.0?

Is there possible to change the color of an individual pixel with OpenGL ES 2.0? Right now, I have found that I can manage that using a vertex. I've used this method to draw it:
GLES20.glDrawArrays(GLES20.GL_POINTS, 0, 1);
The size of the point was set to minimum in order to be a single pixel painted.
All good, until I've needed to draw 3 to 4 millions of them! It takes 5-6 seconds to initialize only one frame. This is time-inefficient as long as the pixels will be updated constantly. The update/ refresh would be preferable to be as close as possible to 60 fps.
How can I paint them in a more efficient way?
Note: It's a must to paint them individually only!
My attempt is here (for a screen of 1440x2560 px):
package com.example.ctelescu.opengl_pixel_draw;
import android.opengl.GLES20;
import android.opengl.GLSurfaceView;
import android.opengl.Matrix;
import java.nio.ByteBuffer;
import java.nio.ByteOrder;
import java.nio.FloatBuffer;
import javax.microedition.khronos.egl.EGLConfig;
import javax.microedition.khronos.opengles.GL10;
public class PixelDrawRenderer implements GLSurfaceView.Renderer {
private float[] mModelMatrix = new float[16];
private float[] mViewMatrix = new float[16];
private float[] mProjectionMatrix = new float[16];
private float[] mMVPMatrix = new float[16];
private final FloatBuffer mVerticesBuffer;
private int mMVPMatrixHandle;
private int mPositionHandle;
private int mColorHandle;
private final int mBytesPerFloat = 4;
private final int mStrideBytes = 7 * mBytesPerFloat;
private final int mPositionOffset = 0;
private final int mPositionDataSize = 3;
private final int mColorOffset = 3;
private final int mColorDataSize = 4;
public PixelDrawRenderer() {
// Define the vertices.
// final float[] vertices = {
// // X, Y, Z,
// // R, G, B, A
// -1f, 1f, 0.0f,
// 1.0f, 0.0f, 0.0f, 1.0f,
//
// -0.9f, 1.2f, 0.0f,
// 0.0f, 0.0f, 1.0f, 1.0f,
//
// -0.88f, 1.2f, 0.0f,
// 0.0f, 1.0f, 0.0f, 1.0f,
//
// -0.87f, 1.2f, 0.0f,
// 0.0f, 1.0f, 0.0f, 1.0f,
//
// -0.86f, 1.2f, 0.0f,
// 0.0f, 1.0f, 0.0f, 1.0f,
//
// -0.85f, 1.2f, 0.0f,
// 0.0f, 1.0f, 0.0f, 1.0f};
// Initialize the buffers.
mVerticesBuffer = ByteBuffer.allocateDirect(22579200 * mBytesPerFloat)
.order(ByteOrder.nativeOrder()).asFloatBuffer();
// mVerticesBuffer.put(vertices);
}
#Override
public void onSurfaceCreated(GL10 glUnused, EGLConfig config) {
// Set the background clear color to gray.
GLES20.glClearColor(0.5f, 0.5f, 0.5f, 0.5f);
// Position the eye behind the origin.
final float eyeX = 0.0f;
final float eyeY = 0.0f;
final float eyeZ = 1.5f;
// We are looking toward the distance
final float lookX = 0.0f;
final float lookY = 0.0f;
final float lookZ = -5.0f;
// Set our up vector. This is where our head would be pointing were we holding the camera.
final float upX = 0.0f;
final float upY = 1.0f;
final float upZ = 0.0f;
// Set the view matrix. This matrix can be said to represent the camera position.
// NOTE: In OpenGL 1, a ModelView matrix is used, which is a combination of a model and
// view matrix. In OpenGL 2, we can keep track of these matrices separately if we choose.
Matrix.setLookAtM(mViewMatrix, 0, eyeX, eyeY, eyeZ, lookX, lookY, lookZ, upX, upY, upZ);
final String vertexShader =
"uniform mat4 u_MVPMatrix; \n" // A constant representing the combined model/view/projection matrix.
+ "attribute vec4 a_Position; \n" // Per-vertex position information we will pass in.
+ "attribute vec4 a_Color; \n" // Per-vertex color information we will pass in.
+ "varying vec4 v_Color; \n" // This will be passed into the fragment shader.
+ "void main() \n" // The entry point for our vertex shader.
+ "{ \n"
+ " v_Color = a_Color; \n" // Pass the color through to the fragment shader.
// It will be interpolated across the vertex.
+ " gl_Position = u_MVPMatrix \n" // gl_Position is a special variable used to store the final position.
+ " * a_Position; \n" // Multiply the vertex by the matrix to get the final point in
+ " gl_PointSize = 0.1; \n"
+ "} \n"; // normalized screen coordinates.
final String fragmentShader =
"#ifdef GL_FRAGMENT_PRECISION_HIGH \n"
+ "precision highp float; \n"
+ "#else \n"
+ "precision mediump float; \n" // Set the default precision to medium. We don't need as high of a
// precision in the fragment shader.
+ "#endif \n"
+ "varying vec4 v_Color; \n" // This is the color from the vertex shader interpolated across the
// vertex per fragment.
+ "void main() \n" // The entry point for our fragment shader.
+ "{ \n"
+ " gl_FragColor = v_Color; \n" // Pass the color directly through the pipeline.
+ "} \n";
// Load in the vertex shader.
int vertexShaderHandle = GLES20.glCreateShader(GLES20.GL_VERTEX_SHADER);
if (vertexShaderHandle != 0) {
// Pass in the shader source.
GLES20.glShaderSource(vertexShaderHandle, vertexShader);
// Compile the shader.
GLES20.glCompileShader(vertexShaderHandle);
// Get the compilation status.
final int[] compileStatus = new int[1];
GLES20.glGetShaderiv(vertexShaderHandle, GLES20.GL_COMPILE_STATUS, compileStatus, 0);
// If the compilation failed, delete the shader.
if (compileStatus[0] == 0) {
GLES20.glDeleteShader(vertexShaderHandle);
vertexShaderHandle = 0;
}
}
if (vertexShaderHandle == 0) {
throw new RuntimeException("Error creating vertex shader.");
}
// Load in the fragment shader shader.
int fragmentShaderHandle = GLES20.glCreateShader(GLES20.GL_FRAGMENT_SHADER);
if (fragmentShaderHandle != 0) {
// Pass in the shader source.
GLES20.glShaderSource(fragmentShaderHandle, fragmentShader);
// Compile the shader.
GLES20.glCompileShader(fragmentShaderHandle);
// Get the compilation status.
final int[] compileStatus = new int[1];
GLES20.glGetShaderiv(fragmentShaderHandle, GLES20.GL_COMPILE_STATUS, compileStatus, 0);
// If the compilation failed, delete the shader.
if (compileStatus[0] == 0) {
GLES20.glDeleteShader(fragmentShaderHandle);
fragmentShaderHandle = 0;
}
}
if (fragmentShaderHandle == 0) {
throw new RuntimeException("Error creating fragment shader.");
}
// Create a program object and store the handle to it.
int programHandle = GLES20.glCreateProgram();
if (programHandle != 0) {
// Bind the vertex shader to the program.
GLES20.glAttachShader(programHandle, vertexShaderHandle);
// Bind the fragment shader to the program.
GLES20.glAttachShader(programHandle, fragmentShaderHandle);
// Bind attributes
GLES20.glBindAttribLocation(programHandle, 0, "a_Position");
GLES20.glBindAttribLocation(programHandle, 1, "a_Color");
// Link the two shaders together into a program.
GLES20.glLinkProgram(programHandle);
// Get the link status.
final int[] linkStatus = new int[1];
GLES20.glGetProgramiv(programHandle, GLES20.GL_LINK_STATUS, linkStatus, 0);
// If the link failed, delete the program.
if (linkStatus[0] == 0) {
GLES20.glDeleteProgram(programHandle);
programHandle = 0;
}
}
if (programHandle == 0) {
throw new RuntimeException("Error creating program.");
}
// Set program handles. These will later be used to pass in values to the program.
mMVPMatrixHandle = GLES20.glGetUniformLocation(programHandle, "u_MVPMatrix");
mPositionHandle = GLES20.glGetAttribLocation(programHandle, "a_Position");
mColorHandle = GLES20.glGetAttribLocation(programHandle, "a_Color");
// Tell OpenGL to use this program when rendering.
GLES20.glUseProgram(programHandle);
}
#Override
public void onSurfaceChanged(GL10 glUnused, int width, int height) {
// Set the OpenGL viewport to the same size as the surface.
GLES20.glViewport(0, 0, width, height);
// Create a new perspective projection matrix. The height will stay the same
// while the width will vary as per aspect ratio.
final float ratio = (float) width / height;
final float left = -ratio;
final float right = ratio;
final float bottom = -1.0f;
final float top = 1.0f;
final float near = 1.0f;
final float far = 10.0f;
Matrix.frustumM(mProjectionMatrix, 0, left, right, bottom, top, near, far);
float[] vertices = new float[22579200];
int counter = 0;
for (float i = -width / 2; i < width / 2; i++) {
for (float j = height / 2; j > -height / 2; j--) {
// Initialize the buffers.
vertices[counter++] = 2f * i * (1f / width); //X
vertices[counter++] = 2f * j * (1.5f / height); //Y
vertices[counter++] = 0; //Z
vertices[counter++] = 1f; //blue
vertices[counter++] = 1f; //green
vertices[counter++] = 0f; //blue
vertices[counter++] = 1f; //alpha
}
}
mVerticesBuffer.put(vertices);
mVerticesBuffer.clear();
}
#Override
public void onDrawFrame(GL10 glUnused) {
GLES20.glClear(GLES20.GL_DEPTH_BUFFER_BIT | GLES20.GL_COLOR_BUFFER_BIT);
// Draw the vertices facing straight on.
Matrix.setIdentityM(mModelMatrix, 0);
drawVertices(mVerticesBuffer);
}
private void drawVertices(final FloatBuffer aVertexBuffer) {
// Pass in the position information
aVertexBuffer.position(mPositionOffset);
GLES20.glVertexAttribPointer(mPositionHandle, mPositionDataSize, GLES20.GL_FLOAT, false,
mStrideBytes, aVertexBuffer);
GLES20.glEnableVertexAttribArray(mPositionHandle);
// Pass in the color information
aVertexBuffer.position(mColorOffset);
GLES20.glVertexAttribPointer(mColorHandle, mColorDataSize, GLES20.GL_FLOAT, false,
mStrideBytes, aVertexBuffer);
GLES20.glEnableVertexAttribArray(mColorHandle);
// This multiplies the view matrix by the model matrix, and stores the result in the MVP matrix
// (which currently contains model * view).
Matrix.multiplyMM(mMVPMatrix, 0, mViewMatrix, 0, mModelMatrix, 0);
// This multiplies the modelview matrix by the projection matrix, and stores the result in the MVP matrix
// (which now contains model * view * projection).
Matrix.multiplyMM(mMVPMatrix, 0, mProjectionMatrix, 0, mMVPMatrix, 0);
GLES20.glUniformMatrix4fv(mMVPMatrixHandle, 1, false, mMVPMatrix, 0);
GLES20.glDrawArrays(GLES20.GL_POINTS, 0, 3225600);
}
}

Use Rajawalirenderer to render colored pointcloud

Since release yamabe in https://github.com/googlesamples/tango-examples-java/tree/master/PointCloudJava and the use of the rajawali renderer the point clouds aren't colored depending on the distance to the 3D camera anymore.
I tried to adapt the code of Points.java as the following, to get the same functionality again:
public class Points extends Object3D {
private static final String sVertexShaderCode = "uniform mat4 uMVPMatrix;"
+ "attribute vec4 vPosition;" + "varying vec4 vColor;"
+ "void main() {" + "gl_PointSize = 5.0;"
+ " gl_Position = uMVPMatrix * vPosition;"
+ " vColor = vPosition;" + "}";
private static final String sFragmentShaderCode = "precision mediump float;"
+ "varying vec4 vColor;"
+ "void main() {"
+ " gl_FragColor = vec4(vColor);" + "}";
private int mMaxNumberofVertices;
public Points(int numberOfPoints) {
super();
mMaxNumberofVertices = numberOfPoints;
init(true);
Material m = new Material(new VertexShader(sVertexShaderCode), new FragmentShader(sFragmentShaderCode));
//m.setColor(Color.GREEN);
setMaterial(m);
}
// Initialize the buffers for Points primitive.
// Since only vertex and Index buffers are used, we only initialize them using setdata call.
protected void init(boolean createVBOs) {
float[] vertices = new float[mMaxNumberofVertices*3];
int[] indices = new int[mMaxNumberofVertices];
for(int i = 0; i < indices.length; ++i){
indices[i] = i;
}
setData(vertices, GLES20.GL_STATIC_DRAW,
null, GLES20.GL_STATIC_DRAW,
null, GLES20.GL_STATIC_DRAW,
null, GLES20.GL_STATIC_DRAW,
indices, GLES20.GL_STATIC_DRAW,
true);
}
// Update the geometry of the points once new Point Cloud Data is available.
public void updatePoints(FloatBuffer pointCloudBuffer, int pointCount) {
pointCloudBuffer.position(0);
mGeometry.setNumIndices(pointCount);
mGeometry.getVertices().position(0);
mGeometry.changeBufferData(mGeometry.getVertexBufferInfo(), pointCloudBuffer, 0, pointCount * 3);
}
public void preRender() {
super.preRender();
setDrawingMode(GLES20.GL_POINTS);
GLES10.glPointSize(5.0f);
}
}
But the points are only colored red instead of having different colors.
I'm quite new to Rajawali and OGL so could someone tell me what part am I missing to get the shader work on the point class.
Thanks very much peter.

Using GL_RG_EXT and GL_UNSINGED_BYTE

I met some problems while using GL_RG_EXT and GL_UNSIGNED_BYTE. Related code:
class TextureBuffer {
public:
GLuint texture;
GLuint frameBuffer;
GLenum internalformat;
GLenum format;
GLenum type;
int w,h;
TextureBuffer() : texture(0), frameBuffer(0) {}
void release() {
if(texture)
{
glDeleteTextures(1, &texture);
texture = 0;
}
if(frameBuffer)
{
glDeleteFramebuffers(1, &frameBuffer);
frameBuffer = 0;
}
}
};
TextureBuffer _maskTexture;
generateRenderToTexture(GL_RG_EXT, GL_RG_EXT, GL_UNSIGNED_BYTE, _maskTexture, _imageWidth, _imageHeight, false);
void SharpenGPU::generateRenderToTexture(GLint internalformat, GLenum format, GLenum type,
TextureBuffer &tb, int w, int h, bool linearInterp)
{
glGenTextures(1, &tb.texture);
glBindTexture(GL_TEXTURE_2D, tb.texture);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, linearInterp ? GL_LINEAR : GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, linearInterp ? GL_LINEAR : GL_NEAREST);
glTexImage2D(GL_TEXTURE_2D, 0, internalformat, w, h, 0, format, type, NULL);
glGenFramebuffers(1, &tb.frameBuffer);
glBindFramebuffer(GL_FRAMEBUFFER, tb.frameBuffer);
glClear(_glClearBits);
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, tb.texture, 0);
GLenum status = glCheckFramebufferStatus(GL_FRAMEBUFFER);
if(status != GL_FRAMEBUFFER_COMPLETE)
printf("Framebuffer status: %x", (int)status);
tb.internalformat = internalformat;
tb.format = format;
tb.type = type;
tb.w = w;
tb.h = h;
}
When I use the following code to define _maskTexture,
generateRenderToTexture(GL_RG_EXT, GL_RG_EXT, GL_HALF_FLOAT_OES, _maskTexture, _imageWidth, _imageHeight, false);
the code goes well. But if I use the lines below to define the _maskTexture, an error appears,
generateRenderToTexture(GL_RG_EXT, GL_RG_EXT, GL_UNSIGNED_BYTE, _maskTexture, _imageWidth, _imageHeight, false);
Error:
ProgramInfoLog: Validation Failed: Fragment program failed to compile with current context state.
Validation Failed: Vertex program failed to compile with current context state.
I'm really puzzled about it.
I knew that the error info is caused by compiling the vertex and shader function. Here is the shader function I found to cause the error:
{
const GLchar fShaderText[] = FRAGMENT_SHADER_SOURCE
(
uniform sampler2D Distance4d; // unsimilar with version 6, patch distance has been prepared.
uniform sampler2D DistanceCenter;
varying highp vec2 uv0;
void main()
{
highp float u_dx = 1./imageWH.x;
highp float v_dy = 1./imageWH.y;
highp vec2 ItBt = texture2D(DistanceCenter, uv0).yz;
highp vec2 direcXY;
highp float remainX = floor(mod(floor(imageWH.x * uv0.x + 0.6),2.) + 0.5); // 0 or 1
highp float remainY = floor(mod(floor(imageWH.y * uv0.y + 0.6),2.) + 0.5); // 0 or 1;
{
//center
highp float sum0 = texture2D(DistanceCenter, uv0).x;
highp float sumMin = sum0;
direcXY = vec2(0.,0.);
highp vec4 sum4d = texture2D(Distance4d, uv0);
//left
if(sum4d.x < sumMin)
{
sumMin = sum4d.x;
direcXY = vec2(-u_dx,0.);
}
//up
if(sum4d.y < sumMin)
{
sumMin = sum4d.y;
direcXY = vec2(0.,v_dy);
}
//right
if(sum4d.z < sumMin)
{
sumMin = sum4d.z;
direcXY = vec2(u_dx,0.);
}
//down
if(sum4d.w < sumMin) // when i disable this line, the error info will disappear
{
sumMin = sum4d.w;
direcXY = vec2(0.,-v_dy);
}
direcXY = (sumMin/sum0 > 0.7)? vec2(0.,0.):direcXY;// Section 4.1.1. thresholding. for that center position is preferred
}
gl_FragColor = vec4(ItBt.x, ItBt.x - ItBt.y, direcXY.x, direcXY.y);
//vec4(It, It - Bt, dx, dy);
}
);
// Store the progrm, compute uniform locations
ProgramUniforms &pu = (_programs["findP2SpeedUpforS7"] = ProgramUniforms());
pu.program = compileShaders(gVertexShaderText, fShaderText);
pu.uniformMap["mvpMatrix"] = glGetUniformLocation(pu.program, "mvpMatrix");
pu.uniformMap["Distance4d"] = glGetUniformLocation(pu.program, "Distance4d");
pu.uniformMap["DistanceCenter"] = glGetUniformLocation(pu.program, "DistanceCenter");
pu.uniformMap["imageWH"] = glGetUniformLocation(pu.program, "imageWH");
}
I have marked out the related line causing the error.
Did someone meet a similar case?
Thanks.

Android animation with OpenGL2.0 - Conceptual questions

I'm learning OpenGLES20 on Android and have some questions. Hope you guys can help me out.
Accordinly to Android Developers website "...OpenGL ES allows you to define drawn objects using coordinates in three-dimensional space.". This and only this is strictly true or they are "hiding" de "w" coordinate? Normal space has 3 coordinates but Clip Space has 4 (x,y,z,w), right?
Why the GLES20.glBindBuffer(arg0, arg1) function is only defined for int values if my triangle is defined in a float array?
Even if I doesn't put the code glBindBuffer(GL_ARRAY_BUFFER, myPositionBufferObject); my program is working. How come this work if I'm not binding my Buffer to the OpenGL target GL_ARRAY_BUFFER??
This is the code for my Triangle. It's the super reduced code only necessary to render the simplest triangle.
public class Triangle {
ByteBuffer myByteBuffer;
FloatBuffer positionBufferObject;
int mProgram;
public Triangle() {
float vertexPositions[] = {
0.75f, 0.75f, 0.0f, 1.0f,
0.75f, -0.75f, 0.0f, 1.0f,
-0.75f, -0.75f, 0.0f, 1.0f
};
myByteBuffer = ByteBuffer.allocateDirect(vertexPositions.length * 4);
myByteBuffer.order(ByteOrder.nativeOrder());
positionBufferObject = myByteBuffer.asFloatBuffer();
positionBufferObject.put(vertexPositions);
positionBufferObject.position(0);
String vertexShaderCode =
"attribute vec4 vPosition;" +
"void main() {" +
" gl_Position = vPosition;" +
"}";
String fragmentShaderCode =
"precision mediump float;" +
"uniform vec4 vColor;" +
"void main() {" +
" gl_FragColor = vColor;" +
"}";
int myVertexShader = GLES20.glCreateShader(GLES20.GL_VERTEX_SHADER);
GLES20.glShaderSource(myVertexShader, vertexShaderCode);
GLES20.glCompileShader(myVertexShader);
int myFragmentShader = GLES20.glCreateShader(GLES20.GL_FRAGMENT_SHADER);
GLES20.glShaderSource(myFragmentShader, fragmentShaderCode);
GLES20.glCompileShader(myFragmentShader);
mProgram = GLES20.glCreateProgram();
GLES20.glAttachShader(mProgram, myVertexShader);
GLES20.glAttachShader(mProgram, myFragmentShader);
GLES20.glLinkProgram(mProgram);
}
public void draw() {
GLES20.glUseProgram(mProgram);
GLES20.glEnableVertexAttribArray(0);
GLES20.glVertexAttribPointer(0, 4, GLES20.GL_FLOAT, false, 0, positionBufferObject);
GLES20.glDrawArrays(GLES20.GL_TRIANGLES, 0, 3);
}
}

C++ shader question

Suppose I have some geometrical data and I wish to render it in wireframe mode. Obviously, this can be done using the API (for example, by setting some appropriate mode like D3DFILL_WIREFRAME in DirectX).
But I was interested if that is possible to achieve using vertex / geometry / pixel shaders (combined, probably).
Does someone have a sample of that?
Thank you.
Perhaps something like http://wn.com/DirectX_C++__Geometry_with_Wireframe_Effect ?
Try this: http://cgg-journal.com/2008-2/06/index.html -- code at the bottom
// ------------------ Vertex Shader --------------------------------
#version 120
#extension GL_EXT_gpu_shader4 : enable
void main(void)
{
gl_Position = ftransform();
}
// ------------------ Geometry Shader --------------------------------
#version 120
#extension GL_EXT_gpu_shader4 : enable
uniform vec2 WIN_SCALE;
noperspective varying vec3 dist;
void main(void)
{
vec2 p0 = WIN_SCALE * gl_PositionIn[0].xy/gl_PositionIn[0].w;
vec2 p1 = WIN_SCALE * gl_PositionIn[1].xy/gl_PositionIn[1].w;
vec2 p2 = WIN_SCALE * gl_PositionIn[2].xy/gl_PositionIn[2].w;
vec2 v0 = p2-p1;
vec2 v1 = p2-p0;
vec2 v2 = p1-p0;
float area = abs(v1.x*v2.y - v1.y * v2.x);
dist = vec3(area/length(v0),0,0);
gl_Position = gl_PositionIn[0];
EmitVertex();
dist = vec3(0,area/length(v1),0);
gl_Position = gl_PositionIn[1];
EmitVertex();
dist = vec3(0,0,area/length(v2));
gl_Position = gl_PositionIn[2];
EmitVertex();
EndPrimitive();
}
// ------------------ Fragment Shader --------------------------------
#version 120
#extension GL_EXT_gpu_shader4 : enable
noperspective varying vec3 dist;
const vec4 WIRE_COL = vec4(1.0,0.0,0.0,1);
const vec4 FILL_COL = vec4(1,1,1,1);
void main(void)
{
float d = min(dist[0],min(dist[1],dist[2]));
float I = exp2(-2*d*d);
gl_FragColor = I*WIRE_COL + (1.0 - I)*FILL_COL;
}