vertex shader
attribute vec2 vertPosition;
attribute vec2 vertTextCord;
varying vec2 fragTextCord;
void main(){
fragTextCord = vertTextCord;
gl_Position = vec4(vertPosition,0.0,1.0);
}
fragment shader
precision highp float;
varying vec2 fragTextCord;
uniform sampler2D image;
uniform sampler2D lut;
void main(){
gl_FragColor = texture2D(image,fragTextCord);
}
texture binding
gl.useProgram(program);
gl.activeTexture(gl.TEXTURE0);
gl.bindTexture(gl.TEXTURE_2D, imageTexture);
gl.uniform1i(gl.getUniformLocation(program, 'image'), 0);
gl.activeTexture(gl.TEXTURE1);
gl.bindTexture(gl.TEXTURE_2D, lutTexture);
gl.uniform1i(gl.getUniformLocation(program, 'lut'), 1);
both sampler2D image & lut render same image
using glview on react native
Related
When running on a Radeon HD 7750 and declaring thisMaterialsource at (A), the program either crashes or freezes the PC to the point I have to power cycle the machine. It works fine when it's declared at position (B). When running on a Geforce GTX 1070, it works fine in both cases.
void main()
{
vec3 ambientSum = vec3(0);
vec3 diffuseSum = vec3(0);
vec3 specSum = vec3(0);
vec3 ambient, diffuse, spec;
// (A) - doesn't work when declared/set here <----------------------------------------
// Material thisMaterialsource = materialBanks[0].bank.materials[materialId];
if (gl_FrontFacing)
{
for (int i=0; i<light.activeLights; ++i)
{
calculateLight(i, inWorldPos.xyz, inNormal.xyz, ambient, diffuse, spec);
ambientSum += ambient;
diffuseSum += diffuse;
specSum += spec;
}
}
else
{
for (int i=0; i<light.activeLights; ++i)
{
calculateLight(i, inWorldPos.xyz, -inNormal.xyz, ambient, diffuse, spec);
ambientSum += ambient;
diffuseSum += diffuse;
specSum += spec;
}
}
ambientSum /= light.activeLights;
// (B) - works when declared/set here <----------------------------------------
Material thisMaterialsource = materialBanks[0].bank.materials[materialId];
vec4 texColor = thisMaterialsource.baseColorFactor;
if(thisMaterialsource.colorTextureIndex > -1){ texColor = texture(texSampler[thisMaterialsource.colorTextureIndex], inUV0) * thisMaterialsource.baseColorFactor; }
vec4 emissive = thisMaterialsource.emissiveFactor;
if (thisMaterialsource.unlitTextureIndex > -1) {
emissive = texture(texSampler[thisMaterialsource.unlitTextureIndex], inUV0) * thisMaterialsource.emissiveFactor;
}
outColor = vec4(ambientSum + diffuseSum, 1) * texColor + vec4(specSum, 1) + emissive;
}
Full shader code:
#version 450
#extension GL_ARB_separate_shader_objects : enable
#extension GL_EXT_nonuniform_qualifier : require
struct LightInfo
{
vec3 Position;//Light Position in eye-coords
vec3 La;//Ambient light intensity
vec3 Ld;//Diffuse light intensity
vec3 Ls;//Specular light intensity
};
struct MaterialInfo
{
vec3 Ka;//Ambient reflectivity
vec3 Kd;//Diffuse reflectivity
vec3 Ks;//Specular reflectivity
float Shininess;//Specular shininess factor
};
struct Material{
vec4 baseColorFactor;
vec4 emissiveFactor;
float metallicFactor;
float roughnessFactor;
float normalScale;
float occlusionStrength;
int colorTextureIndex;
int normalTextureIndex;
int unlitTextureIndex;
int ambientOcclusionTextureIndex;
int metallicRoughnessTextureIndex;
int isTwoSided;
int alphaMode;
float alphaCutoff;
};
struct MaterialBank{
Material materials[80];
};
struct LightData{
vec4 pos;
vec4 color;
};
#define MAX_CAMERAS 16
struct CameraData{
vec4 pos;
mat4 mat;
mat4 view;
mat4 proj;
mat4 clip;
};
layout(push_constant) uniform PushConsts {
uint cameraIndex;
uint time;
} pushConsts;
layout(binding = 0) uniform UniformBufferCamera {
CameraData cameras[MAX_CAMERAS];
uint cameraCount;
uint cameraMax;
} cam;
layout(binding = 1) uniform UniformBufferLight {
LightData lights[16];
vec4 ambientColor;
int activeLights;
} light;
layout(set=1, binding = 0) uniform sampler2D texSampler[32];
layout(set=2, binding = 0) uniform UniformBufferMat {
MaterialBank bank;
} materialBanks[1];
layout(location = 0) in vec4 inNormal;
layout(location = 1) in vec2 inUV0;
layout(location = 2) in vec2 inUV1;
layout(location = 3) in vec4 inWorldPos;
layout(location = 4) in flat uint materialId;
layout(location = 0) out vec4 outColor;
void calculateLight(int lightIndex, vec3 position, vec3 norm, out vec3 ambient, out vec3 diffuse, out vec3 spec)
{
LightData thisLightSource = light.lights[lightIndex];
Material thisMaterialsource = materialBanks[0].bank.materials[materialId];
LightInfo thisLight;
thisLight.Position = thisLightSource.pos.xyz;//Light Position in eye-coords
thisLight.La = light.ambientColor.rgb;//Ambient light intensity
thisLight.Ld = thisLightSource.color.rgb;//Diffuse light intensity
thisLight.Ls = thisLightSource.color.rgb;//Specular light intensity
MaterialInfo thisMaterial;
vec4 texColor = thisMaterialsource.baseColorFactor;
if (thisMaterialsource.colorTextureIndex > -1){ texColor = texture(texSampler[thisMaterialsource.colorTextureIndex], inUV0) * thisMaterialsource.baseColorFactor; }
vec4 mrSample = vec4(1);
if (thisMaterialsource.metallicRoughnessTextureIndex > -1) { mrSample = texture(texSampler[thisMaterialsource.metallicRoughnessTextureIndex], inUV0); }
float perceptualRoughness = mrSample.g * thisMaterialsource.roughnessFactor;
float metallic = mrSample.b * thisMaterialsource.metallicFactor;
thisMaterial.Ka= texColor.rgb * (metallic+perceptualRoughness)/2;//Ambient reflectivity
thisMaterial.Kd= texColor.rgb * (perceptualRoughness);//Diffuse reflectivity
thisMaterial.Ks= texColor.rgb * (metallic-perceptualRoughness);//Specular reflectivity
thisMaterial.Shininess= (metallic);//Specular shininess factor
vec3 n = normalize(norm);
vec3 s = normalize(thisLight.Position - position);
vec3 v = normalize(-position);
vec3 r = reflect(-s, n);
ambient = thisLight.La * thisMaterial.Ka;
if (thisMaterialsource.ambientOcclusionTextureIndex > -1){
float ao = texture(texSampler[thisMaterialsource.ambientOcclusionTextureIndex], inUV0).r;
ambient = ambient * ao;
}
float sDotN = max(dot(s, n), 0.0);
diffuse = thisLight.Ld * thisMaterial.Kd * sDotN;
spec = thisLight.Ls * thisMaterial.Ks * pow(max(dot(r, v), 0.0), thisMaterial.Shininess);
}
void main()
{
vec3 ambientSum = vec3(0);
vec3 diffuseSum = vec3(0);
vec3 specSum = vec3(0);
vec3 ambient, diffuse, spec;
// (A) - doesn't work when declared/set here <----------------------------------------
// Material thisMaterialsource = materialBanks[0].bank.materials[materialId];
if (gl_FrontFacing)
{
for (int i=0; i<light.activeLights; ++i)
{
calculateLight(i, inWorldPos.xyz, inNormal.xyz, ambient, diffuse, spec);
ambientSum += ambient;
diffuseSum += diffuse;
specSum += spec;
}
}
else
{
for (int i=0; i<light.activeLights; ++i)
{
calculateLight(i, inWorldPos.xyz, -inNormal.xyz, ambient, diffuse, spec);
ambientSum += ambient;
diffuseSum += diffuse;
specSum += spec;
}
}
ambientSum /= light.activeLights;
// (B) - works when declared/set here <----------------------------------------
Material thisMaterialsource = materialBanks[0].bank.materials[materialId];
vec4 texColor = thisMaterialsource.baseColorFactor;
if(thisMaterialsource.colorTextureIndex > -1){ texColor = texture(texSampler[thisMaterialsource.colorTextureIndex], inUV0) * thisMaterialsource.baseColorFactor; }
vec4 emissive = thisMaterialsource.emissiveFactor;
if (thisMaterialsource.unlitTextureIndex > -1) {
emissive = texture(texSampler[thisMaterialsource.unlitTextureIndex], inUV0) * thisMaterialsource.emissiveFactor;
}
outColor = vec4(ambientSum + diffuseSum, 1) * texColor + vec4(specSum, 1) + emissive;
}
Please excuse the quality of my shader code, I'm just experimenting and cobbling stuff together, and came upon this issue that, aside from being annoying to debug, completely blindsided me.
It's fixed now, but I'd like to know why it happened and honestly, unlike lots of other issues I've dealt with while learning, I don't even know where to start looking.
Is this simply a bug in GPU/drivers or a manifestation of some profound and arcane machinations that dictate how shaders work? How can I debug this sort of issues? Is there a way to see this is going to fail, other than running it? I'd really like to know, I care much more about learning from this than just getting it to run.
If you don't mind going through one or two more power-off-on cycles, try the following simplification for your shader:
void main()
{
... variables
// (A) - doesn't work when declared/set here <----------------------------------------
// Material thisMaterialsource = materialBanks[0].bank.materials[materialId];
/// Instead of doing two almost identical loops, extract the normal inversion
vec3 normal = (gl_FrontFacing ? inNormal : -inNormal).xyz;
for (int i=0; i<light.activeLights; ++i)
{
calculateLight(i, inWorldPos.xyz, normal, ambient, diffuse, spec);
ambientSum += ambient;
diffuseSum += diffuse;
specSum += spec;
}
ambientSum /= light.activeLights;
// (B) - works when declared/set here <----------------------------------------
Material thisMaterialsource = materialBanks[0].bank.materials[materialId];
... all the rest
}
A wild guess might be the following: the HD7750 is relatively old and two loops over (dynamic number) of active lights can generate too much shader bytecode, if the GLSL compiler does something strange. So you get an overflow of available thread memory. The GTX 1070 is obviously much more powerful and would not suffer from an "abuse" like this.
Other than this, the shader should be fine and the above change is still a workaround, not a must. We have encountered strange behavior (i.e., somehting not working contrary to the spec) of GLSL even on newer Radeons, but it was not similar to your problem.
I met some problems while using GL_RG_EXT and GL_UNSIGNED_BYTE. Related code:
class TextureBuffer {
public:
GLuint texture;
GLuint frameBuffer;
GLenum internalformat;
GLenum format;
GLenum type;
int w,h;
TextureBuffer() : texture(0), frameBuffer(0) {}
void release() {
if(texture)
{
glDeleteTextures(1, &texture);
texture = 0;
}
if(frameBuffer)
{
glDeleteFramebuffers(1, &frameBuffer);
frameBuffer = 0;
}
}
};
TextureBuffer _maskTexture;
generateRenderToTexture(GL_RG_EXT, GL_RG_EXT, GL_UNSIGNED_BYTE, _maskTexture, _imageWidth, _imageHeight, false);
void SharpenGPU::generateRenderToTexture(GLint internalformat, GLenum format, GLenum type,
TextureBuffer &tb, int w, int h, bool linearInterp)
{
glGenTextures(1, &tb.texture);
glBindTexture(GL_TEXTURE_2D, tb.texture);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, linearInterp ? GL_LINEAR : GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, linearInterp ? GL_LINEAR : GL_NEAREST);
glTexImage2D(GL_TEXTURE_2D, 0, internalformat, w, h, 0, format, type, NULL);
glGenFramebuffers(1, &tb.frameBuffer);
glBindFramebuffer(GL_FRAMEBUFFER, tb.frameBuffer);
glClear(_glClearBits);
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, tb.texture, 0);
GLenum status = glCheckFramebufferStatus(GL_FRAMEBUFFER);
if(status != GL_FRAMEBUFFER_COMPLETE)
printf("Framebuffer status: %x", (int)status);
tb.internalformat = internalformat;
tb.format = format;
tb.type = type;
tb.w = w;
tb.h = h;
}
When I use the following code to define _maskTexture,
generateRenderToTexture(GL_RG_EXT, GL_RG_EXT, GL_HALF_FLOAT_OES, _maskTexture, _imageWidth, _imageHeight, false);
the code goes well. But if I use the lines below to define the _maskTexture, an error appears,
generateRenderToTexture(GL_RG_EXT, GL_RG_EXT, GL_UNSIGNED_BYTE, _maskTexture, _imageWidth, _imageHeight, false);
Error:
ProgramInfoLog: Validation Failed: Fragment program failed to compile with current context state.
Validation Failed: Vertex program failed to compile with current context state.
I'm really puzzled about it.
I knew that the error info is caused by compiling the vertex and shader function. Here is the shader function I found to cause the error:
{
const GLchar fShaderText[] = FRAGMENT_SHADER_SOURCE
(
uniform sampler2D Distance4d; // unsimilar with version 6, patch distance has been prepared.
uniform sampler2D DistanceCenter;
varying highp vec2 uv0;
void main()
{
highp float u_dx = 1./imageWH.x;
highp float v_dy = 1./imageWH.y;
highp vec2 ItBt = texture2D(DistanceCenter, uv0).yz;
highp vec2 direcXY;
highp float remainX = floor(mod(floor(imageWH.x * uv0.x + 0.6),2.) + 0.5); // 0 or 1
highp float remainY = floor(mod(floor(imageWH.y * uv0.y + 0.6),2.) + 0.5); // 0 or 1;
{
//center
highp float sum0 = texture2D(DistanceCenter, uv0).x;
highp float sumMin = sum0;
direcXY = vec2(0.,0.);
highp vec4 sum4d = texture2D(Distance4d, uv0);
//left
if(sum4d.x < sumMin)
{
sumMin = sum4d.x;
direcXY = vec2(-u_dx,0.);
}
//up
if(sum4d.y < sumMin)
{
sumMin = sum4d.y;
direcXY = vec2(0.,v_dy);
}
//right
if(sum4d.z < sumMin)
{
sumMin = sum4d.z;
direcXY = vec2(u_dx,0.);
}
//down
if(sum4d.w < sumMin) // when i disable this line, the error info will disappear
{
sumMin = sum4d.w;
direcXY = vec2(0.,-v_dy);
}
direcXY = (sumMin/sum0 > 0.7)? vec2(0.,0.):direcXY;// Section 4.1.1. thresholding. for that center position is preferred
}
gl_FragColor = vec4(ItBt.x, ItBt.x - ItBt.y, direcXY.x, direcXY.y);
//vec4(It, It - Bt, dx, dy);
}
);
// Store the progrm, compute uniform locations
ProgramUniforms &pu = (_programs["findP2SpeedUpforS7"] = ProgramUniforms());
pu.program = compileShaders(gVertexShaderText, fShaderText);
pu.uniformMap["mvpMatrix"] = glGetUniformLocation(pu.program, "mvpMatrix");
pu.uniformMap["Distance4d"] = glGetUniformLocation(pu.program, "Distance4d");
pu.uniformMap["DistanceCenter"] = glGetUniformLocation(pu.program, "DistanceCenter");
pu.uniformMap["imageWH"] = glGetUniformLocation(pu.program, "imageWH");
}
I have marked out the related line causing the error.
Did someone meet a similar case?
Thanks.
I'm drawing particles using
glDrawElements(GL_POINTS, count, GL_UNSIGNED_SHORT, 0);
The vertex shader is very simple:
void main()
{
gl_Position = modelViewProjectionMatrix * position;
gl_PointSize = 10.0;
}
The fragment shader tries to use gl_PointCoord:
void main()
{
gl_FragColor = vec4(gl_PointCoord.s, gl_PointCoord.t, 0.0, 1.0);
}
But the points are always black, so gl_PointCoord is always (0.0, 0.0).
This is on OpenGL ES 2.0, tested on an iPhone 5 and an iPad 3.
Found it:
glEnable(GL_POINT_SPRITE_OES);
I'm learning OpenGLES20 on Android and have some questions. Hope you guys can help me out.
Accordinly to Android Developers website "...OpenGL ES allows you to define drawn objects using coordinates in three-dimensional space.". This and only this is strictly true or they are "hiding" de "w" coordinate? Normal space has 3 coordinates but Clip Space has 4 (x,y,z,w), right?
Why the GLES20.glBindBuffer(arg0, arg1) function is only defined for int values if my triangle is defined in a float array?
Even if I doesn't put the code glBindBuffer(GL_ARRAY_BUFFER, myPositionBufferObject); my program is working. How come this work if I'm not binding my Buffer to the OpenGL target GL_ARRAY_BUFFER??
This is the code for my Triangle. It's the super reduced code only necessary to render the simplest triangle.
public class Triangle {
ByteBuffer myByteBuffer;
FloatBuffer positionBufferObject;
int mProgram;
public Triangle() {
float vertexPositions[] = {
0.75f, 0.75f, 0.0f, 1.0f,
0.75f, -0.75f, 0.0f, 1.0f,
-0.75f, -0.75f, 0.0f, 1.0f
};
myByteBuffer = ByteBuffer.allocateDirect(vertexPositions.length * 4);
myByteBuffer.order(ByteOrder.nativeOrder());
positionBufferObject = myByteBuffer.asFloatBuffer();
positionBufferObject.put(vertexPositions);
positionBufferObject.position(0);
String vertexShaderCode =
"attribute vec4 vPosition;" +
"void main() {" +
" gl_Position = vPosition;" +
"}";
String fragmentShaderCode =
"precision mediump float;" +
"uniform vec4 vColor;" +
"void main() {" +
" gl_FragColor = vColor;" +
"}";
int myVertexShader = GLES20.glCreateShader(GLES20.GL_VERTEX_SHADER);
GLES20.glShaderSource(myVertexShader, vertexShaderCode);
GLES20.glCompileShader(myVertexShader);
int myFragmentShader = GLES20.glCreateShader(GLES20.GL_FRAGMENT_SHADER);
GLES20.glShaderSource(myFragmentShader, fragmentShaderCode);
GLES20.glCompileShader(myFragmentShader);
mProgram = GLES20.glCreateProgram();
GLES20.glAttachShader(mProgram, myVertexShader);
GLES20.glAttachShader(mProgram, myFragmentShader);
GLES20.glLinkProgram(mProgram);
}
public void draw() {
GLES20.glUseProgram(mProgram);
GLES20.glEnableVertexAttribArray(0);
GLES20.glVertexAttribPointer(0, 4, GLES20.GL_FLOAT, false, 0, positionBufferObject);
GLES20.glDrawArrays(GLES20.GL_TRIANGLES, 0, 3);
}
}
Suppose I have some geometrical data and I wish to render it in wireframe mode. Obviously, this can be done using the API (for example, by setting some appropriate mode like D3DFILL_WIREFRAME in DirectX).
But I was interested if that is possible to achieve using vertex / geometry / pixel shaders (combined, probably).
Does someone have a sample of that?
Thank you.
Perhaps something like http://wn.com/DirectX_C++__Geometry_with_Wireframe_Effect ?
Try this: http://cgg-journal.com/2008-2/06/index.html -- code at the bottom
// ------------------ Vertex Shader --------------------------------
#version 120
#extension GL_EXT_gpu_shader4 : enable
void main(void)
{
gl_Position = ftransform();
}
// ------------------ Geometry Shader --------------------------------
#version 120
#extension GL_EXT_gpu_shader4 : enable
uniform vec2 WIN_SCALE;
noperspective varying vec3 dist;
void main(void)
{
vec2 p0 = WIN_SCALE * gl_PositionIn[0].xy/gl_PositionIn[0].w;
vec2 p1 = WIN_SCALE * gl_PositionIn[1].xy/gl_PositionIn[1].w;
vec2 p2 = WIN_SCALE * gl_PositionIn[2].xy/gl_PositionIn[2].w;
vec2 v0 = p2-p1;
vec2 v1 = p2-p0;
vec2 v2 = p1-p0;
float area = abs(v1.x*v2.y - v1.y * v2.x);
dist = vec3(area/length(v0),0,0);
gl_Position = gl_PositionIn[0];
EmitVertex();
dist = vec3(0,area/length(v1),0);
gl_Position = gl_PositionIn[1];
EmitVertex();
dist = vec3(0,0,area/length(v2));
gl_Position = gl_PositionIn[2];
EmitVertex();
EndPrimitive();
}
// ------------------ Fragment Shader --------------------------------
#version 120
#extension GL_EXT_gpu_shader4 : enable
noperspective varying vec3 dist;
const vec4 WIRE_COL = vec4(1.0,0.0,0.0,1);
const vec4 FILL_COL = vec4(1,1,1,1);
void main(void)
{
float d = min(dist[0],min(dist[1],dist[2]));
float I = exp2(-2*d*d);
gl_FragColor = I*WIRE_COL + (1.0 - I)*FILL_COL;
}