I have a three.js scene made with rogue engine, which im using to make a VR experience.
In that im using a fairly complex shader, it takes world space location of two locators for transitioning between their normal shader and just some color, the transition is using noise for some effect (see video below, its showing the effect of the first locator but the second one is also similar, it goes bottom to top),
the location of the object is passed as Vector 3 uniforms., the shader itself im injecting to a MeshStandardMaterial using onBeforeCompile.
the performance is already bad and really tanks when im using textures, im using three texture sets for the scene, im using diffuse,rough,metal,emission and AO so each is sampled thrice and then masked using vertex colors. (not present in the code below)
varying vec3 W_Pos; //world position vector
varying vec3 F_Nrml; //normal vector
varying vec3 camDir; // cam facing
varying vec3 vertexColor;
uniform vec3 astral_locator; // First locator
uniform vec3 astral_spread; // i pass the locator's scale here and scale it up for the transition
uniform vec3 starScatter_starScale_nScale; //three float parameters im passing as vector for easier control in rogue engine
uniform vec3 breakPoints;
uniform vec3 c1;
uniform vec3 c2;
uniform vec3 c3;
uniform vec3 noise_locator; //Second locator
uniform vec3 nStretch_nScale_emSharp;// same as above, three floats passed as a vector
uniform vec3 emCol;
vec4 mod289(vec4 x){return x - floor(x * (1.0 / 289.0)) * 289.0;}
vec4 perm(vec4 x){return mod289(((x * 34.0) + 1.0) * x);}
vec3 rand2( vec3 p ) {
return fract(
sin(
vec3(dot(p,vec3(127.1,310.7,143.54)),dot(p,vec3(269.5,183.3,217.42)),dot(p,vec3(2459.5,133.3,17.42))))*43758.5453);
}
float mapping(float number, float inMin, float inMax, float outMin, float outMax){return (number - inMin) * (outMax - outMin) / (inMax - inMin) + outMin;}
vec4 vertexMask(vec4 map1, vec4 map2, vec4 map3, vec3 vertMask){vec4 me1 = mix(vec4(0.0), map1,vertMask.r); vec4 me2 = mix(me1, map2,vertMask.g); vec4 me3 = mix(me2, map3,vertMask.b); return me3;}
//Noises
float noise(vec3 p){
vec3 a = floor(p);
vec3 d = p - a;
d = d * d * (3.0 - 2.0 * d);
vec4 b = a.xxyy + vec4(0.0, 1.0, 0.0, 1.0);
vec4 k1 = perm(b.xyxy);
vec4 k2 = perm(k1.xyxy + b.zzww);
vec4 c = k2 + a.zzzz;
vec4 k3 = perm(c);
vec4 k4 = perm(c + 1.0);
vec4 o1 = fract(k3 * (1.0 / 41.0));
vec4 o2 = fract(k4 * (1.0 / 41.0));
vec4 o3 = o2 * d.z + o1 * (1.0 - d.z);
vec2 o4 = o3.yw * d.x + o3.xz * (1.0 - d.x);
return o4.y * d.y + o4.x * (1.0 - d.y);
}
float facing(){
vec3 nrml = F_Nrml;
vec3 cam = camDir;
vec3 normal = normalize(nrml.xyz);
vec3 eye = normalize(-cam);
float rim = smoothstep(-0.75, 1.0, 1.0 - dot(normal, eye));
return clamp(rim, 0.0, 1.0);
}
//Function for the second locatior
vec2 noiseMove(vec3 loc,vec3 noiseDat){
float noise_stretch = noiseDat.x;
float noise_scale = noiseDat.y;
float emission_sharp = noiseDat.z;
float noise_move = -loc.y;
float gen_Pattern;
float gen_Pattern_invert;
float emi_sharp_fac;
float transparency;
float emission;
gen_Pattern = ((W_Pos.y+noise_move)*noise_stretch) + noise(W_Pos.xyz*noise_scale);
gen_Pattern_invert = 1.0 - gen_Pattern;
emi_sharp_fac = clamp(emission_sharp*1000.0,1.0,1000.0)*gen_Pattern;
emission = emission_sharp*gen_Pattern;
emission = 1.0 - emission;
emission = emission * emi_sharp_fac;
emission = clamp(emission,0.0,1.0);
transparency = clamp(gen_Pattern_invert,0.0,1.0);
return vec2(emission,transparency);
}
//Function for the first locator
vec4 astral(vec3 loc, vec3 spr,vec3 cee1,vec3 cee2,vec3 cee3, vec3 breakks, vec3 star){//star is WIP
float f = facing();
float re1 = mapping(f,breakks.x,1.0,0.0,1.0);
float re2 = mapping(f,breakks.y,1.0,0.0,1.0);
float re3 = mapping(f,breakks.z,1.0,0.0,1.0);
vec3 me1 = mix(vec3(0.,0.,0.),cee1,re1);
vec3 me2 = mix(me1,cee2,re2);
vec3 me3 = mix(me2,cee3,re3);
float dist = distance(W_Pos.xyz + (noise(W_Pos.xyz*star.z)-0.5),loc);
float val = step(dist,spr.x);
return vec4(me3,val);
}
void main(){
vec4 ast = astral(astral_locator,astral_spread,c1,c2,c3,breakPoints,starScatter_starScale_nScale);
vec2 noice = noiseMove(noise_locator,nStretch_nScale_emSharp);
vec3 outp = mix(mix(outgoingLight,ast.xyz,ast.w),emCol,noice.x); //Take output light from the three.js shader and mix it with the custom shader
float t = noice.y;
#ifdef NONSCIFI
t = 1.0 - noice.y;
#endif
t *= diffuseColor.a;
gl_FragColor = vec4(outp*t,t);
}
is there a way to optimize it better? a couple things i can think of is storing the noise and then using it instead of calculating every frame, and figuring out occlusion culling (renderpass doesnt work well in VR so cant store the depth pass, gotta figure a way), objects in the scene are already instances to reduce draw calls. im assuming making some objects static might help, including the locators but i dont know if it will stop the uniform from updating every frame.
is there anything else that can be done?
also i apologize for the structure of the question, i rarely post questions thanks to stackoverflow :p
Related
I know have a many solve make effect for image. But i choice GPUImage (GPUImageLookupFilter) make my image.
My source code i use.
GPUImagePicture *sourceImagePic = [[GPUImagePicture alloc] initWithImage:sourceImage];
GPUImagePicture *lookupImageSource = [[GPUImagePicture alloc] initWithImage:[UIImage imageNamed:#"lookup.png"]];
GPUImageLookupFilter *lookupImageFilter = [[GPUImageLookupFilter alloc] init];
[sourceImagePic addTarget:lookupImageFilter];
[lookupImageSource addTarget:lookupImageFilter];
[lookupImageFilter useNextFrameForImageCapture];
[sourceImagePic processImage];
[lookupImageSource processImage];
resultImage = [lookupImageFilter imageFromCurrentFramebufferWithOrientation:UIImageOrientationUp];
return resultImage;
First i use lookup image 8x8 (5122)
But when i working with array image (thumb in video or choice many image in library) memory higher.
I think if i use small lookup image (4x4) memory can reduced. I make a lookup image 4x4(162).
And i try edit code of GPUImageLookupFilter but it not work.
void main(){
highp vec4 textureColor = texture2D(inputImageTexture, textureCoordinate);
highp float blueColor = textureColor.b * 63.0;
highp vec2 quad1;
quad1.y = floor(floor(blueColor) / 8.0);
quad1.x = floor(blueColor) - (quad1.y * 8.0);
highp vec2 quad2;
quad2.y = floor(ceil(blueColor) / 8.0);
quad2.x = ceil(blueColor) - (quad2.y * 8.0);
highp vec2 texPos1;
texPos1.x = (quad1.x * 0.125) + 0.5/512.0 + ((0.125 - 1.0/512.0) * textureColor.r);
texPos1.y = (quad1.y * 0.125) + 0.5/512.0 + ((0.125 - 1.0/512.0) * textureColor.g);
highp vec2 texPos2;
texPos2.x = (quad2.x * 0.125) + 0.5/512.0 + ((0.125 - 1.0/512.0) * textureColor.r);
texPos2.y = (quad2.y * 0.125) + 0.5/512.0 + ((0.125 - 1.0/512.0) * textureColor.g);
lowp vec4 newColor1 = texture2D(inputImageTexture2, texPos1);
lowp vec4 newColor2 = texture2D(inputImageTexture2, texPos2);
lowp vec4 newColor = mix(newColor1, newColor2, fract(blueColor));
gl_FragColor = mix(textureColor, vec4(newColor.rgb, textureColor.w), intensity);
}
You can help me edit this code work with image 4x4.
Thank you!.
The following algorithm will work for any size of the lookup texture. You just have to adapt tiles to the number of the tiles in the x and y direction, and colTexSize to the full size of the color lookup texture.
The algorithm is identical to the original algorithm, except the constant size values, which have been replaced by the variables tiles and colTexSize.
void main()
{
vec2 tiles = vec2( 4.0, 4.0 ); // original texture vec2( 8.0, 8.0 )
vec2 colTexSize = vec2( 64.0, 64.0 ) // original texture vec2( 512.0, 512.0 )
highp vec4 textureColor = texture2D(inputImageTexture, textureCoordinate);
highp float blueColor = textureColor.b * ((tiles.x*tiles.y)-1.0);
highp vec2 quad1;
quad1.y = floor(floor(blueColor) / tiles.y);
quad1.x = floor(blueColor) - (quad1.y * tiles.x);
highp vec2 quad2;
quad2.y = floor(ceil(blueColor) / tiles.y);
quad2.x = ceil(blueColor) - (quad2.y * tiles.x);
highp vec2 texPos1;
texPos1.x = (quad1.x / tiles.x) + 0.5/colTexSize.x + (1.0/(tiles.x - colTexSize.x) * textureColor.r);
texPos1.y = (quad1.y / tiles.y) + 0.5/colTexSize.y + (1.0/(tiles.y - colTexSize.y) * textureColor.g);
highp vec2 texPos2;
texPos2.x = (quad2.x / tiles.x) + 0.5/colTexSize.x + (1.0/(tiles.x - colTexSize.x) * textureColor.r);
texPos2.y = (quad2.y / tiles.y) + 0.5/colTexSize.y + (1.0/(tiles.y - colTexSize.y) * textureColor.g);
lowp vec4 newColor1 = texture2D(inputImageTexture2, texPos1);
lowp vec4 newColor2 = texture2D(inputImageTexture2, texPos2);
lowp vec4 newColor = mix(newColor1, newColor2, fract(blueColor));
gl_FragColor = mix(textureColor, vec4(newColor.rgb, textureColor.w), intensity);
}
I am trying to make a blur effect on my modified texture2d object but how do I sample the texture2d object without using the texture2D function?
For example, to make the sampler2d baseTexture blurry, I could sum up the samples with the following code:
void main(){
vec4 baseColor = texture2D( baseTexture, vUv);
vec4 sum = vec4( 0.0 );
sum += texture2D( baseTexture, vec2( vUv.x, vUv.y - 4.0 * v ) ) * 0.051;
sum += texture2D( baseTexture, vec2( vUv.x, vUv.y - 3.0 * v ) ) * 0.0918;
...etc
gl_FragColor = sum
}
How would I be able to sample from the baseColor instead of the baseTexture?
Thanks,
Jim
I am rewriting my application using the modern OpenGL (3.3+) in Jogl.
I am using all the conventional matrices, that is objectToWorld, WorldToCamera and CameraToClip (or model, view and projection)
I created a class for handling all the mouse movements as McKesson does in its "Learning modern 3d graphic programming" with a method to offset the camera target position:
private void offsetTargetPosition(MouseEvent mouseEvent){
Mat4 currMat = calcMatrix();
Quat orientation = currMat.toQuaternion();
Quat invOrientation = orientation.conjugate();
Vec2 current = new Vec2(mouseEvent.getX(), mouseEvent.getY());
Vec2 diff = current.minus(startDragMouseLoc);
Vec3 worldOffset = invOrientation.mult(new Vec3(-diff.x*10, diff.y*10, 0.0f));
currView.setTargetPos(currView.getTargetPos().plus(worldOffset));
startDragMouseLoc = current;
}
calcMatrix() returns the camera matrix, the rest should be clear.
What I want is moving my object along with the mouse, right now mouse movement and object translation don't correspond, that is they are not linear, because I am dealing with different spaces I guess..
I learnt that if I want to apply a transformation T in space O, but related in space C, I should do the following with p as vertex:
C * (C * T * C^-1) * O * p
Should I do something similar?
I solved with a damn simple proportion...
float x = (float) (10000 * 2 * EC_Main.viewer.getAspect() * diff.x / EC_Main.viewer.getWidth());
float y = (float) (10000 * 2 * diff.y / EC_Main.viewer.getHeight());
Vec3 worldOffset = invOrientation.mult(new Vec3(-x, y, 0.0f));
Taking into account my projection matrix
Mat4 orthographicMatrix = Jglm.orthographic(-10000.0f * (float) EC_Main.viewer.getAspect(), 10000.0f * (float) EC_Main.viewer.getAspect(),
-10000.0f, 10000.0f, -10000.0f, 10000.0f);
I've been playing with shaders with a toy called ShaderToy and trying to create a top-down view water effect for a 2D game based on the code (for the shader) from Jonas Wagner. You can easily copy/paste this code in ShaderToy and see the effect.
The shader looks cool in ShaderToy, but when I try to replicate the same in my code, something goes wrong, see image below:
http://ivan.org.es/temp/shader_problems.png
My vertex shader (I don't know what's the one used in ShaderToy):
uniform mat4 Projection;
attribute vec2 Position;
void main(){
gl_Position = Projection*vec4(Position, 0.0, 1.0);
}
The Fragment shader:
precision lowp float;
vec3 sunDirection = normalize(vec3(0.0, -1.0, 0.0));
vec3 sunColor = vec3(1.0, 0.8, 0.7);
vec3 eye = vec3(0.0, 1.0, 0.0);
vec4 getNoise(vec2 uv){
vec2 uv0 = (uv/103.0)+vec2(iGlobalTime/17.0, iGlobalTime/29.0);
vec2 uv1 = uv/107.0-vec2(iGlobalTime/-19.0, iGlobalTime/31.0);
vec2 uv2 = uv/vec2(897.0, 983.0)+vec2(iGlobalTime/101.0, iGlobalTime/97.0);
vec2 uv3 = uv/vec2(991.0, 877.0)-vec2(iGlobalTime/109.0, iGlobalTime/-113.0);
vec4 noise = (texture2D(iChannel0, uv0)) +
(texture2D(iChannel0, uv1)) +
(texture2D(iChannel0, uv2)) +
(texture2D(iChannel0, uv3));
return noise*0.5-1.0;
}
void sunLight(const vec3 surfaceNormal, const vec3 eyeDirection, float shiny, float spec, float diffuse, inout vec3 diffuseColor, inout vec3 specularColor){
vec3 reflection = normalize(reflect(-sunDirection, surfaceNormal));
float direction = max(0.0, dot(eyeDirection, reflection));
specularColor += pow(direction, shiny)*sunColor*spec;
diffuseColor += max(dot(sunDirection, surfaceNormal),0.0)*sunColor*diffuse;
}
void main(){
vec2 uv = gl_FragCoord.xy / iResolution.xy;
uv *= 100.0;
vec4 noise = getNoise(uv);
vec3 surfaceNormal = normalize(noise.xzy*vec3(2.0, 1.0, 2.0));
vec3 diffuse = vec3(0.3);
vec3 specular = vec3(0.0);
vec3 worldToEye = vec3(0.0, 1.0, 0.0);//eye-worldPosition;
vec3 eyeDirection = normalize(worldToEye);
sunLight(surfaceNormal, eyeDirection, 100.0, 1.5, 0.5, diffuse, specular);
gl_FragColor = vec4((diffuse+specular+vec3(0.1))*vec3(0.3, 0.5, 0.9), 1.0);
}
Please notice that the fragment shader code is exactly the same in ShaderToy and in my engine, it seems to me like uv coords from gl_FragCoord are somehow wrong or a precision problem, because after a while the effect goes worse and worse. I'm using an orthographic projection, but it shouldn't have too much to do with this since I'm getting uv coordinates directly from screen.
Some insights on what's going on?
It turns out that I was loading my textures with
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
The shader noise function was expecting GL_REPEAT instead.
this is my very first post here but I'm banging my head against a wall trying to figure out this problem. The code below is my fragment shader for an incredibly simple Opengl es 2.0 app. The vertex shader is doing all the usual business. The problem is with the specular highlight, it is performing the calculations on what seems to be a per-vertex basis, not per-fragment. If anyone could explain to me why this is happening I would greatly appreciate it. Thanks.
BTW im sorry for the inefficient code, I was simply going back to the very basics of lighting to attempt to track this problem.
precision highp float;
struct DirectionLight
{
vec4 Ambient;
vec4 Diffuse;
vec4 Specular;
};
struct Material
{
vec4 Ambient;
vec4 Diffuse;
vec4 Specular;
float SpecularExponent;
};
const float c_zero = 0.0;
const float c_one = 1.0;
uniform DirectionLight u_directionLight;
uniform Material u_material;
uniform sampler2D u_texture;
uniform vec3 u_lightPosition;
uniform vec3 u_eyePosition;
varying vec3 v_position;
varying vec3 v_normal;
varying vec2 v_texCoord;
void main()
{
vec4 totalLight = vec4(c_zero, c_zero, c_zero, c_zero);
vec4 ambient = vec4(c_zero, c_zero, c_zero, c_zero);
vec4 diffuse = vec4(c_zero, c_zero, c_zero, c_zero);
vec4 specular = vec4(c_zero, c_zero, c_zero, c_zero);
vec3 halfPlane = vec3(c_zero, c_zero, c_zero);
halfPlane = normalize(u_eyePosition + u_lightPosition);
float distance = length(u_lightPosition - v_position);
float attenuation = c_one / (c_one + (0.1 * distance) + (0.01 * distance * distance));
vec3 lightVector = normalize(u_lightPosition - v_position);
float ndotl = max(dot(v_normal, lightVector), 0.1);
float ndoth = max(dot(v_normal, halfPlane), 0.1);
ambient = u_directionLight.Ambient * u_material.Ambient;
diffuse = ndotl * u_directionLight.Diffuse * u_material.Diffuse;
if(ndoth > c_zero)
{
specular = pow(ndoth, u_material.SpecularExponent) * u_directionLight.Specular * u_material.Specular;
}
totalLight = ambient + ((diffuse + specular) * attenuation);
gl_FragColor = totalLight;// * (texture2D(u_texture, v_texCoord));
}
I think this could help you with Diffuse lighting http://www.learnopengles.com/android-lesson-two-ambient-and-diffuse-lighting/