Generating Vertices in OpenGL ES 2.0 Vertex Shader - opengl-es-2.0

I am trying to draw a triangle with three vertices a,b,c. Typically, i would have these three vertex co-ordinates in an array and pass it as an attribute to my vertex shader.
But , is it possible to generate these vertex co-ordinates within the vertex shader itself rather than passing as an attribute (i.e. per vertex co-ordinate value for corresponding vertex position). If yes, any sample program to support it ?
Thanks.

You can create variables in vertex shader and pass it to fragment shader. An example:
Vertex shader
precision highp float;
uniform float u_time;
uniform float u_textureSize;
uniform mat4 u_mvpMatrix;
attribute vec4 a_position;
// This will be passed into the fragment shader.
varying vec2 v_textureCoordinate0;
void main()
{
// Create texture coordinate
v_textureCoordinate0 = a_position.xy / u_textureSize;
gl_Position = u_mvpMatrix * a_position;
}
And the fragment shader:
precision mediump float;
uniform float u_time;
uniform float u_pixel_amount;
uniform sampler2D u_texture0;
// Interpolated texture coordinate per fragment.
varying vec2 v_textureCoordinate0;
void main(void)
{
vec2 size = vec2( 1.0 / u_pixel_amount, 1.0 / u_pixel_amount);
vec2 uv = v_textureCoordinate0 - mod(v_textureCoordinate0,size);
gl_FragColor = texture2D( u_texture0, uv );
gl_FragColor.a=1.0;
}
How you can see, vector 2D named v_textureCoordinate0 is created in vertex shader and its interpolated value is used in fragment shader.
I hope it help you.

Related

Optimization for scene with custom shader

I have a three.js scene made with rogue engine, which im using to make a VR experience.
In that im using a fairly complex shader, it takes world space location of two locators for transitioning between their normal shader and just some color, the transition is using noise for some effect (see video below, its showing the effect of the first locator but the second one is also similar, it goes bottom to top),
the location of the object is passed as Vector 3 uniforms., the shader itself im injecting to a MeshStandardMaterial using onBeforeCompile.
the performance is already bad and really tanks when im using textures, im using three texture sets for the scene, im using diffuse,rough,metal,emission and AO so each is sampled thrice and then masked using vertex colors. (not present in the code below)
varying vec3 W_Pos; //world position vector
varying vec3 F_Nrml; //normal vector
varying vec3 camDir; // cam facing
varying vec3 vertexColor;
uniform vec3 astral_locator; // First locator
uniform vec3 astral_spread; // i pass the locator's scale here and scale it up for the transition
uniform vec3 starScatter_starScale_nScale; //three float parameters im passing as vector for easier control in rogue engine
uniform vec3 breakPoints;
uniform vec3 c1;
uniform vec3 c2;
uniform vec3 c3;
uniform vec3 noise_locator; //Second locator
uniform vec3 nStretch_nScale_emSharp;// same as above, three floats passed as a vector
uniform vec3 emCol;
vec4 mod289(vec4 x){return x - floor(x * (1.0 / 289.0)) * 289.0;}
vec4 perm(vec4 x){return mod289(((x * 34.0) + 1.0) * x);}
vec3 rand2( vec3 p ) {
return fract(
sin(
vec3(dot(p,vec3(127.1,310.7,143.54)),dot(p,vec3(269.5,183.3,217.42)),dot(p,vec3(2459.5,133.3,17.42))))*43758.5453);
}
float mapping(float number, float inMin, float inMax, float outMin, float outMax){return (number - inMin) * (outMax - outMin) / (inMax - inMin) + outMin;}
vec4 vertexMask(vec4 map1, vec4 map2, vec4 map3, vec3 vertMask){vec4 me1 = mix(vec4(0.0), map1,vertMask.r); vec4 me2 = mix(me1, map2,vertMask.g); vec4 me3 = mix(me2, map3,vertMask.b); return me3;}
//Noises
float noise(vec3 p){
vec3 a = floor(p);
vec3 d = p - a;
d = d * d * (3.0 - 2.0 * d);
vec4 b = a.xxyy + vec4(0.0, 1.0, 0.0, 1.0);
vec4 k1 = perm(b.xyxy);
vec4 k2 = perm(k1.xyxy + b.zzww);
vec4 c = k2 + a.zzzz;
vec4 k3 = perm(c);
vec4 k4 = perm(c + 1.0);
vec4 o1 = fract(k3 * (1.0 / 41.0));
vec4 o2 = fract(k4 * (1.0 / 41.0));
vec4 o3 = o2 * d.z + o1 * (1.0 - d.z);
vec2 o4 = o3.yw * d.x + o3.xz * (1.0 - d.x);
return o4.y * d.y + o4.x * (1.0 - d.y);
}
float facing(){
vec3 nrml = F_Nrml;
vec3 cam = camDir;
vec3 normal = normalize(nrml.xyz);
vec3 eye = normalize(-cam);
float rim = smoothstep(-0.75, 1.0, 1.0 - dot(normal, eye));
return clamp(rim, 0.0, 1.0);
}
//Function for the second locatior
vec2 noiseMove(vec3 loc,vec3 noiseDat){
float noise_stretch = noiseDat.x;
float noise_scale = noiseDat.y;
float emission_sharp = noiseDat.z;
float noise_move = -loc.y;
float gen_Pattern;
float gen_Pattern_invert;
float emi_sharp_fac;
float transparency;
float emission;
gen_Pattern = ((W_Pos.y+noise_move)*noise_stretch) + noise(W_Pos.xyz*noise_scale);
gen_Pattern_invert = 1.0 - gen_Pattern;
emi_sharp_fac = clamp(emission_sharp*1000.0,1.0,1000.0)*gen_Pattern;
emission = emission_sharp*gen_Pattern;
emission = 1.0 - emission;
emission = emission * emi_sharp_fac;
emission = clamp(emission,0.0,1.0);
transparency = clamp(gen_Pattern_invert,0.0,1.0);
return vec2(emission,transparency);
}
//Function for the first locator
vec4 astral(vec3 loc, vec3 spr,vec3 cee1,vec3 cee2,vec3 cee3, vec3 breakks, vec3 star){//star is WIP
float f = facing();
float re1 = mapping(f,breakks.x,1.0,0.0,1.0);
float re2 = mapping(f,breakks.y,1.0,0.0,1.0);
float re3 = mapping(f,breakks.z,1.0,0.0,1.0);
vec3 me1 = mix(vec3(0.,0.,0.),cee1,re1);
vec3 me2 = mix(me1,cee2,re2);
vec3 me3 = mix(me2,cee3,re3);
float dist = distance(W_Pos.xyz + (noise(W_Pos.xyz*star.z)-0.5),loc);
float val = step(dist,spr.x);
return vec4(me3,val);
}
void main(){
vec4 ast = astral(astral_locator,astral_spread,c1,c2,c3,breakPoints,starScatter_starScale_nScale);
vec2 noice = noiseMove(noise_locator,nStretch_nScale_emSharp);
vec3 outp = mix(mix(outgoingLight,ast.xyz,ast.w),emCol,noice.x); //Take output light from the three.js shader and mix it with the custom shader
float t = noice.y;
#ifdef NONSCIFI
t = 1.0 - noice.y;
#endif
t *= diffuseColor.a;
gl_FragColor = vec4(outp*t,t);
}
is there a way to optimize it better? a couple things i can think of is storing the noise and then using it instead of calculating every frame, and figuring out occlusion culling (renderpass doesnt work well in VR so cant store the depth pass, gotta figure a way), objects in the scene are already instances to reduce draw calls. im assuming making some objects static might help, including the locators but i dont know if it will stop the uniform from updating every frame.
is there anything else that can be done?
also i apologize for the structure of the question, i rarely post questions thanks to stackoverflow :p

shader value conversion error when passing value between vertex and fragment shader

I have the following fragment and vertex shader.
Vertex:
#version 450
layout(location = 0) in vec2 Position;
layout(location = 1) in vec4 Color;
layout(location = 0) out vec2 fPosition;
void main()
{
gl_Position = vec4(Position, 0, 1);
fPosition = Position;
}
Fragment:
#version 450
layout(location = 0) in vec2 fPosition;
layout(location = 0) out vec4 fColor;
void main() {
vec4 colors[4] = vec4[](
vec4(1.0, 0.0, 0.0, 1.0),
vec4(0.0, 1.0, 0.0, 1.0),
vec4(0.0, 0.0, 1.0, 1.0),
vec4(0.0, 0.0, 0.0, 1.0)
);
fColor = vec4(1.0);
for(int row = 0; row < 2; row++) {
for(int col = 0; col < 2; col++) {
float dist = distance(fPosition, vec2(-0.50 + col, 0.50 - row));
float delta = fwidth(dist);
float alpha = smoothstep(0.45-delta, 0.45, dist);
fColor = mix(colors[row*2+col], fColor, alpha);
}
}
}
But when compiling this I am getting the following error:
cannot convert from ' gl_Position 4-component vector of float Position' to 'layout( location=0) smooth out highp 2-component vector of float'
And i have no clue how to fix it. (this is my first time doing shader programming).
If additional information is needed please let me know.
1.
You do not need to specify layouts when transferring variables between vertex shader and fragment shader. Remove the layout(location = 0) parameter for the fPosition variable in the vertex and fragment shader.
2.
You only need to specify layout if you passing the variables (your position buffers) to the vertex shader through buffers. To add on, variables like positions, normals and textureCoords must always pass through the vertex shader first and then to the fragment shader.
3.
When exporting your final colour (fColor in your case) from the fragment shader, you do not need to pass a location, just specify the vector4 variable as out vec4 fColor; openGL detects it automatically.
4.
The error you actually got was telling you that you were assigning vector4 variable (fColor) to your already stored vec2 variables (fPosition). Note: In your vertex shader at attribute (location) "0", you had accessed the vertices that you had loaded, but you tried to assign a vector4 to the same location later in the fragment shader. OpenGL does not automatically overwrite data like that.

opengl es 2.0 - optimizing fragment shader

I am developing a game for Android/iOS and need to optimize the rendering.
The game enables the user to deform terrain so i am using a gray scale image for the terrain (value of 1 in the terrain means solid ground, and 0 means no ground) and applying a fragment shader on it (there is also a background image). This works very well with 60 fps constant, but the problem is that i also need to render a border on the terrains edge. So to do so i blur the edges when deforming and in the fragment shader i draw the border based on the terrains density/transparency (the border is a 1x64 texture).
The problem is that when rendering the border i need to do a dynamic texture read which drops the frame rate to 20. Is there any way i could optimize this? If i would replace the border texture with a uniform float array would it help or would it be the same as reading from a 2d texture?
The shader code:
varying mediump vec2 frag_background_texcoord;
varying mediump vec2 frag_density_texcoord;
varying mediump vec2 frag_terrain_texcoord;
uniform sampler2D density_texture;
uniform sampler2D terrain_texture;
uniform sampler2D mix_texture;
uniform sampler2D background_texture;
void main()
{
lowp vec4 background_color = texture2D(background_texture, frag_background_texcoord);
lowp vec4 terrain_color = texture2D(terrain_texture, frag_terrain_texcoord);
highp float density = texture2D(density_texture, frag_density_texcoord).a;
if(density > 0.5)
{
lowp vec4 mix_color = texture2D(mix_texture, vec2(density, 1.0)); <- dynamic texture read (FPS drops to 20), would replacing this with a uniform float array help (would also need to calculate the index in the array)?
gl_FragColor = mix(terrain_color, mix_color, mix_color.a);
} else
{
gl_FragColor = background_color;
}
}
Figured it out. The way i fixed it was to remove all branching. Runs of ~60fps now.
The optimized code:
varying mediump vec2 frag_background_texcoord;
varying mediump vec2 frag_density_texcoord;
varying mediump vec2 frag_terrain_texcoord;
uniform sampler2D density_texture;
uniform sampler2D terrain_texture;
uniform sampler2D mix_texture;
uniform sampler2D background_texture;
void main()
{
lowp vec4 background_color = texture2D(background_texture, frag_background_texcoord);
lowp vec4 terrain_color = texture2D(terrain_texture, frag_terrain_texcoord);
lowp vec4 mix_color = texture2D(mix_texture, vec2(density, 0.0));
lowp float density = texture2D(density_texture, frag_density_texcoord).a;
gl_FragColor = mix(mix(bg_color, terrain_color, mix_color.r), mix_color, mix_color.a);
}

Shader not showing up properly

I've been playing with shaders with a toy called ShaderToy and trying to create a top-down view water effect for a 2D game based on the code (for the shader) from Jonas Wagner. You can easily copy/paste this code in ShaderToy and see the effect.
The shader looks cool in ShaderToy, but when I try to replicate the same in my code, something goes wrong, see image below:
http://ivan.org.es/temp/shader_problems.png
My vertex shader (I don't know what's the one used in ShaderToy):
uniform mat4 Projection;
attribute vec2 Position;
void main(){
gl_Position = Projection*vec4(Position, 0.0, 1.0);
}
The Fragment shader:
precision lowp float;
vec3 sunDirection = normalize(vec3(0.0, -1.0, 0.0));
vec3 sunColor = vec3(1.0, 0.8, 0.7);
vec3 eye = vec3(0.0, 1.0, 0.0);
vec4 getNoise(vec2 uv){
vec2 uv0 = (uv/103.0)+vec2(iGlobalTime/17.0, iGlobalTime/29.0);
vec2 uv1 = uv/107.0-vec2(iGlobalTime/-19.0, iGlobalTime/31.0);
vec2 uv2 = uv/vec2(897.0, 983.0)+vec2(iGlobalTime/101.0, iGlobalTime/97.0);
vec2 uv3 = uv/vec2(991.0, 877.0)-vec2(iGlobalTime/109.0, iGlobalTime/-113.0);
vec4 noise = (texture2D(iChannel0, uv0)) +
(texture2D(iChannel0, uv1)) +
(texture2D(iChannel0, uv2)) +
(texture2D(iChannel0, uv3));
return noise*0.5-1.0;
}
void sunLight(const vec3 surfaceNormal, const vec3 eyeDirection, float shiny, float spec, float diffuse, inout vec3 diffuseColor, inout vec3 specularColor){
vec3 reflection = normalize(reflect(-sunDirection, surfaceNormal));
float direction = max(0.0, dot(eyeDirection, reflection));
specularColor += pow(direction, shiny)*sunColor*spec;
diffuseColor += max(dot(sunDirection, surfaceNormal),0.0)*sunColor*diffuse;
}
void main(){
vec2 uv = gl_FragCoord.xy / iResolution.xy;
uv *= 100.0;
vec4 noise = getNoise(uv);
vec3 surfaceNormal = normalize(noise.xzy*vec3(2.0, 1.0, 2.0));
vec3 diffuse = vec3(0.3);
vec3 specular = vec3(0.0);
vec3 worldToEye = vec3(0.0, 1.0, 0.0);//eye-worldPosition;
vec3 eyeDirection = normalize(worldToEye);
sunLight(surfaceNormal, eyeDirection, 100.0, 1.5, 0.5, diffuse, specular);
gl_FragColor = vec4((diffuse+specular+vec3(0.1))*vec3(0.3, 0.5, 0.9), 1.0);
}
Please notice that the fragment shader code is exactly the same in ShaderToy and in my engine, it seems to me like uv coords from gl_FragCoord are somehow wrong or a precision problem, because after a while the effect goes worse and worse. I'm using an orthographic projection, but it shouldn't have too much to do with this since I'm getting uv coordinates directly from screen.
Some insights on what's going on?
It turns out that I was loading my textures with
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
The shader noise function was expecting GL_REPEAT instead.

Why not vec3 for OpenGL ES 2.0 gl_Position?

I am new to OpenGL ES 2.0, and cannot understand the following simplest shader:
attribute vec4 vPosition;
void main()
{
gl_Position = vPosition;
}
My question is, since a position would be a vector of (x, y, z), why is gl_Position a vec4 instead of vec3?
The w in vec4(x, y, z, w) is used for clipping, and plays its part while linear algebra transformations are applied to the position.
By default, this should be set to 1.0.
See here for some more info: http://web.archive.org/web/20160408103910/http://iphonedevelopment.blogspot.com/2010/11/opengl-es-20-for-iOS-chapter-4.html
If you provide your vertices to the shader directly in clip space, you could just pass x,y,z and add 1 as the w component in that shader.
attribute vec3 vPosition; // vec3 instead of vec4
void main()
{
gl_Position = vec4 (vPosition, 1.0);
}