Related
I am relatily new to GLSL.
I want to create a solar system model and use it as a wallpaper (using shadertoy) (Something like this and while i have the planets moving correctly i cant figure out how to do the helix paths that follow those planets.
Here is my code so far
uniform vec2 iResolution;
uniform float iTime;
#define pi 3.141592653589
float circ(vec2 uv, vec2 pos, float rad, float blur) {
return smoothstep(blur, 0., length(-uv + pos)-rad); //draws a circle to the screen
}
float line(vec2 uv, vec3 start, vec3 end, float width) {
vec2 p = uv - start.xy;
vec2 d = end.xy - start.xy;
float l = length(d);
d = normalize(d); //direction
float t = clamp(dot(p, d), 0., l);
return (length(p - d*t)) < width ? 1 : 0.;
}
float helix(vec2 uv, vec3 start, vec3 direction, float width, float length, float angle) {
float delta = iTime / angle;
vec2 p = uv - start.xy;
vec2 d = (normalize(direction) * length).xy;
float l = length(d);
d /= l;
float t = clamp(dot(p, d), 0., l);
return (length(p - d*t)) < width ? 1 : 0.;
}
vec3 rotate(vec3 point, vec3 angle) {
mat3 rot = mat3(
cos(angle.y)*cos(angle.z), cos(angle.z)*sin(angle.x)*sin(angle.y)-cos(angle.x)*sin(angle.z), cos(angle.x)*cos(angle.z)*sin(angle.y)+sin(angle.x)*sin(angle.z),
cos(angle.y)*sin(angle.z), cos(angle.x)*cos(angle.z)+sin(angle.x)*sin(angle.y)*sin(angle.z), -cos(angle.z)*sin(angle.x)+cos(angle.x)*sin(angle.y)*sin(angle.z),
-sin(angle.y), cos(angle.y)*sin(angle.x), cos(angle.x)*cos(angle.y));
return rot * point;
}
void main() {
vec2 uv = fragCoord / iResolution.xy;
float ratio = iResolution.x / iResolution.y;
uv -= .5; //center origin
uv.x = uv.x * ratio;//make screen square
uv /= .3;//zoom
float planetA[5] = float[](0., iTime / 0.241, iTime / 0.6152, iTime, iTime / 1.8809);
vec3 planets[5] = vec3[](
vec3(0.), // sun
vec3(cos(planetA[1]) * .4, sin(planetA[1]) * .4, 0.), // mercury
vec3(cos(planetA[2]) * .7, sin(planetA[2]) * .7, 0.), // venus
vec3(cos(planetA[3]), sin(planetA[3]), 0.), // earth
vec3(cos(planetA[4])*1.5, sin(planetA[4])*1.5, 0.)// mars
);
vec3 planetsC[5] = vec3[](
vec3(0.89, 0.9, 0.45), // sun
vec3(0.54, 0.57, 0.63), // mercury
vec3(0.9, 0.5, 0.2), // venus
vec3(0.2, 0.3, 0.8), // earth
vec3(0.8, 0.3, 0.2)// mars
);
vec3 rotVec = vec3(-pi/4, pi/4, 0.);
fragColor = vec4(0.);
fragColor = mix(fragColor, vec4(1.), line(uv, vec3(0.), rotate(vec3(0., 0., 2.), rotVec), 0.01)); //sun trail
for (int i = 1; i < planets.length(); i++) {
planets[i] = rotate(planets[i], vec3(-pi/4., pi/4., 0.)); //rotate the planet
fragColor = mix(fragColor, vec4(planetsC[i], 1.), helix(uv, planets[i], rotate(vec3(0., 0., 2.), rotVec), 0.01, 2., planetA[i])); //planet trail
}
for (int i = 0; i < planets.length(); i++) { //draws the planets
fragColor = mix(fragColor, vec4(planetsC[i], 1.), circ(uv, planets[i].xy, 0.05, 0.01));
}
}
the helix function is currently only a modified version of the line method but i want it to curve around the suns trail.
Any advice and/or help would be appreciated as i am still learing.
I have tried to convert the helix equation:
x = r * cos(t) y = r * sin(t) z = t but havent gotten it to work
heres the method currently, although it only displays a straigt line:
float helix(vec2 uv, vec3 start, vec3 direction, float width, float length, float angle) {
float delta = iTime / angle;
vec2 p = uv - start.xy;
vec2 d = (normalize(direction) * length).xy;
float l = length(d);
d /= l;
float t = clamp(dot(p, d), 0., l);
return (length(p - d*t)) < width ? 1 : 0.;
}
In the hair rendering slide developed by Sheuermann at ATI at GDC 2004, I found code like this:
float StrandSpecular (float3 T, float3 V, float3 L, float exponent)
{
float3 H = normalize(L + V);
float dotTH = dot(T, H);
float sinTH = sqrt(1.0 - dotTH*dotTH);
float dirAtten = smoothstep(-1.0, 0.0, dot(T, H));
return dirAtten * pow(sinTH, exponent);
}
I truly have no idea of the value dirAtten mean, what does this exactly mean in this shading model?
I regard this dirAtten as one attenuation coeffecient and it controls the range of the lighting you can see.
I write this question because I don't understand a piece of code in an example provided by Cocos2D for iPhone:
-(CGAffineTransform) nodeToParentTransform
{
b2Vec2 pos = body_->GetPosition();
float x = pos.x * PTM_RATIO;
float y = pos.y * PTM_RATIO;
if ( ignoreAnchorPointForPosition_ ) {
x += anchorPointInPoints_.x;
y += anchorPointInPoints_.y;
}
// Make matrix
float radians = body_->GetAngle();
float c = cosf(radians);
float s = sinf(radians);
if( ! CGPointEqualToPoint(anchorPointInPoints_, CGPointZero) ){
x += c*-anchorPointInPoints_.x + -s*-anchorPointInPoints_.y;
y += s*-anchorPointInPoints_.x + c*-anchorPointInPoints_.y;
}
// Rot, Translate Matrix
transform_ = CGAffineTransformMake( c, s,
-s, c,
x, y );
return transform_;
}
It's in the PhysicsSprite.mm file.
Maybe it's because I'm very bad with space geometry, but if someone can explain me, I very appreciate.
Thanks a lot.
if( ! CGPointEqualToPoint(anchorPointInPoints_, CGPointZero) ){
x += c*-anchorPointInPoints_.x + -s*-anchorPointInPoints_.y;
y += s*-anchorPointInPoints_.x + c*-anchorPointInPoints_.y;
}
The above piece of code simply rotates the xy coordinate axes anticlockwise by $180-\theta$
degrees, and adds the new coordinates to the previous x,y coordinates that were obtained from the piece of code before the above mentioned lines.
http://en.wikipedia.org/wiki/Rotation_of_axes provides the rotation of axes formula
I want to create a struct which is like a CGPoint, but with 3 coordinates instead of 2.
I create it in the following way:
typedef struct {CGFloat x;CGFloat y;CGFloat z;} CG3Vector;
CG_INLINE CG3Vector CG3VectorMake(CGFloat x, CGFloat y, CGFloat z)
{
CG3Vector p; p.x = x; p.y = y; p.z = z; return p;
}
It works fine. But I now want to improve this struct so that it has the constants like for CGPoint: CGPointZero
Also what is the way to introduce the limits for particular components of the struct, like it is for the CGSize, where components are never lower than 0?
Thanks.
You could create constants like this:
const CG3Vector CG3VectorZero = { 0, 0, 0 };
If you want limits, I suppose you can do some checking like this:
CG_INLINE CG3Vector CG3VectorMake(CGFloat x, CGFloat y, CGFloat z)
{
// normalize the values
x = fmod(x, 360);
y = fmod(y, 360);
z = fmod(z, 360);
x = (x < 0) ? 360 + x : x;
y = (y < 0) ? 360 + y : y;
z = (z < 0) ? 360 + z : z;
return (CG3Vector) { x, y, z };
}
I am having trouble displaying a raw YUV file that it is in NV12 format.
I can display a selected frame, however, it is still mainly in black and white with certain shades of pink and green.
Here is how my output looks like
Anyways, here is how my program works. (This is done in cocoa/objective-c, but I need your expert advice on program algorithm, not on syntax.)
Prior to program execution, the YUV file is stored in a binary file named "test.yuv". The file is in NV12 format, meaning the Y plan is stored first, then the UV plan is interlaced. My file extraction has no problem because I did a lot testings on it.
During initiation, I create a lookup table that will convert binary/8 bites/a byte/chars into respected Y, U, V float values
For the Y plane this is my code
-(void)createLookupTableY //creates a lookup table for converting a single byte into a float between 0 and 1
{
NSLog(#"YUVFrame: createLookupTableY");
lookupTableY = new float [256];
for (int i = 0; i < 256; i++)
{
lookupTableY[i] = (float) i /255;
//NSLog(#"lookupTableY[%d]: %f",i,lookupTableY[i]);//prints out the value of each float
}
}
The U Plane lookup table
-(void)createLookupTableU //creates a lookup table for converting a single byte into a float between 0 and 1
{
NSLog(#"YUVFrame: createLookupTableU");
lookupTableU = new float [256];
for (int i = 0; i < 256; i++)
{
lookupTableU[i] = -0.436 + (float) i / 255* (0.436*2);
NSLog(#"lookupTableU[%d]: %f",i,lookupTableU[i]);//prints out the value of each float
}
}
And the V look-up table
-(void)createLookupTableV //creates a lookup table for converting a single byte into a float between 0 and 1
{
NSLog(#"YUVFrame: createLookupTableV");
lookupTableV = new float [256];
for (int i = 0; i < 256; i++)
{
lookupTableV[i] = -0.615 + (float) i /255 * (0.615*2);
NSLog(#"lookupTableV[%d]: %f",i,lookupTableV[i]);//prints out the value of each float
}
}
after this point, I extract the Y & UV plan and store them into two buffers, yBuffer & uvBuffer
at this point, I attempt to convert the YUV data and stored it into a RGB buffer array called "frameImage"
-(void)sortAndConvert//sort the extracted frame data into an array of float
{
NSLog(#"YUVFrame: sortAndConvert");
int frameImageCounter = 0;
int pixelCounter = 0;
for (int y = 0; y < YUV_HEIGHT; y++)//traverse through the frame's height
{
for ( int x = 0; x < YUV_WIDTH; x++)//traverse through the frame's width
{
float Y = lookupTableY [yBuffer [y*YUV_WIDTH + x] ];
float U = lookupTableU [uvBuffer [ ((y / 2) * (YUV_WIDTH / 2) + (x/2)) * 2 ] ];
float V = lookupTableV [uvBuffer [ ((y / 2) * (YUV_WIDTH / 2) + (x/2)) * 2 + 1] ];
float RFormula = Y + 1.13983f * V;
float GFormula = Y - 0.39465f * U - 0.58060f * V;
float BFormula = Y + 2.03211f * U;
frameImage [frameImageCounter++] = [self clampValue:RFormula];
frameImage [frameImageCounter++] = [self clampValue:GFormula];
frameImage [frameImageCounter++] = [self clampValue:BFormula];
}
}
}
then I tried to draw the Image in OpenGL
-(void)drawFrame:(int )x
{
GLuint texture;
glGenTextures(1, &texture);
glBindTexture(GL_TEXTURE_2D, texture);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, YUV_WIDTH, YUV_HEIGHT, 0, GL_RGB, GL_FLOAT, frameImage);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glEnable(GL_TEXTURE_2D);
glBindTexture(GL_TEXTURE_2D, texture);
glRotatef( 180.0f, 1.0f, 0.0f, 0.0f );
glBegin( GL_QUADS );
glTexCoord2d(0.0,0.0); glVertex2d(-1.0,-1.0);
glTexCoord2d(1.0,0.0); glVertex2d(+1.0,-1.0);
glTexCoord2d(1.0,1.0); glVertex2d(+1.0,+1.0);
glTexCoord2d(0.0,1.0); glVertex2d(-1.0,+1.0);
glEnd();
glFlush();
}
so basically this my program in a nut shell. essentially i read the binary YUV files, stores all the data in a char array buffer. i then translate these values into their respected YUV float values.
This is where I think the error might be: in the Y lookup table I standardize the Y plane to [0,1], in the U plane I standardized the values between [-0.435,0.436], and in the V plane I standardized it bewteen [-0.615,0.615]. I did this because those are the YUV value ranges according to wikipedia.
And the YUV to RGB formula is the same formula from Wikipedia. I have also tried various other formulas, and this is the only formula that gives the rough outlook of the frame. Anyone might venture to guess to why my program is not correctly displaying the YUV frame data. I think it is something to do with my standardization technique, but it seems alright to me.
I have done a lot of testings, and I am 100% certain that the error is caused by by look up table. I don't think my setting formulas are correct.
A note to everyone who is reading
this. For the longest time, my frame
was not displaying properly because I
was not able to extract the frame data
correctly.
When I first started to program, I was
under the impression that in a clip of
say 30 frames, all 30 Y planar datas
are first written in the data file,
followed then by UV plane datas.
What I found out through trial and
error was that for a YUV data file,
specifically NV12, the data file is
stored in the following fashion
Y(1) UV(1) Y(2) UV(2) ... ...
#nschmidt
I changed my code to what you suggested
float U = lookupTableU [uvBuffer [ (y * (YUV_WIDTH / 4) + (x/4)) * 2 ] ]
float V = lookupTableU [uvBuffer [ (y * (YUV_WIDTH / 4) + (x/4)) * 2 + 1] ]
and this is the result that i get
here is the print line from the console. i am print out the values for Y, U, V and
RGB value that are being translated and stored on in the frameImage array
YUV:[0.658824,-0.022227,-0.045824] RGBFinal:[0.606593,0.694201,0.613655]
YUV:[0.643137,-0.022227,-0.045824] RGBFinal:[0.590906,0.678514,0.597969]
YUV:[0.607843,-0.022227,-0.045824] RGBFinal:[0.555612,0.643220,0.562675]
YUV:[0.592157,-0.022227,-0.045824] RGBFinal:[0.539926,0.627534,0.546988]
YUV:[0.643137,0.025647,0.151941] RGBFinal:[0.816324,0.544799,0.695255]
YUV:[0.662745,0.025647,0.151941] RGBFinal:[0.835932,0.564406,0.714863]
YUV:[0.690196,0.025647,0.151941] RGBFinal:[0.863383,0.591857,0.742314]
Update July 13, 2009
The problem was finally solved thanks to the recommendation from nschmidt . It turns out that my YUV file was actually in YUV 4:1:1 format. I was originally told that it was in YUV NV12 format. Anyways, I would like to share my results with you.
Here is output
and my code for decode is as follows
float Y = (float) yBuffer [y*YUV_WIDTH + x] ;
float U = (float) uvBuffer [ ((y / 2) * (YUV_WIDTH / 2) + (x/2)) ] ;
float V = (float) uvBuffer [ ((y / 2) * (YUV_WIDTH / 2) + (x/2)) + UOffset] ;
float RFormula = (1.164*(Y-16) + (1.596* (V - 128) ));
float GFormula = ((1.164 * (Y - 16)) - (0.813 * ((V) - 128)) - (0.391 * ((U) - 128)));
float BFormula = ((1.164 * (Y - 16)) + (2.018 * ((U) - 128)));
frameImage [frameImageCounter] = (unsigned char)( (int)[self clampValue:RFormula]);
frameImageCounter ++;
frameImage [frameImageCounter] = (unsigned char)((int)[self clampValue:GFormula]);
frameImageCounter++;
frameImage [frameImageCounter] = (unsigned char)((int) [self clampValue:BFormula]);
frameImageCounter++;
GLuint texture;
glGenTextures(1, &texture);
glEnable(GL_TEXTURE_2D);
glBindTexture(GL_TEXTURE_2D, texture);
glTexParameteri(GL_TEXTURE_2D, GL_GENERATE_MIPMAP_SGIS, GL_TRUE);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, YUV_WIDTH, YUV_HEIGHT, 0, GL_RGB, GL_UNSIGNED_BYTE, frameImage);
//glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_WRAP_S,GL_CLAMP_TO_EDGE_SGIS);
glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_WRAP_T,GL_CLAMP_TO_EDGE_SGIS);
glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_MAG_FILTER,GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_MIN_FILTER,GL_LINEAR_MIPMAP_LINEAR);
glRotatef( 180.0f, 1.0f, 0.0f, 0.0f );
glBegin( GL_QUADS );
glTexCoord2d(0.0,0.0); glVertex2d(-1.0,-1.0);
glTexCoord2d(1.0,0.0); glVertex2d(+1.0,-1.0);
glTexCoord2d(1.0,1.0); glVertex2d(+1.0,+1.0);
glTexCoord2d(0.0,1.0); glVertex2d(-1.0,+1.0);
glEnd();
NSLog(#"YUVFrameView: drawRect complete");
glFlush();
essentially, I used NSData for the raw file extraction. stored in a char array buffer. For YUV to RGB conversion, I used the above formula, afterwards, I clamped the values to [0:255]. then I used a 2DTexture in OpenGL for display.
So if you are converting YUV to RGB in the future, use the formula from above. If are using the YUV to RGB conversion formula from the earlier post, then you need to display the texture in GL_Float from the values for RGB are clamped between [0:1]
Next try :)
I think you're uv buffer is not interleaved. It looks like the U values come first and then the array of V values. Changing the lines to
unsigned int voffset = YUV_HEIGHT * YUV_WIDTH / 2;
float U = lookupTableU [uvBuffer [ y * (YUV_WIDTH / 2) + x/2] ];
float V = lookupTableV [uvBuffer [ voffset + y * (YUV_WIDTH / 2) + x/2] ];
might indicate if this is really the case.
I think you're addressing U and V values incorrectly. Rather than:
float U = lookupTableU [uvBuffer [ ((y / 2) * (x / 2) + (x/2)) * 2 ] ];
float V = lookupTableV [uvBuffer [ ((y / 2) * (x / 2) + (x/2)) * 2 + 1] ];
It should be something along the lines of
float U = lookupTableU [uvBuffer [ ((y / 2) * (YUV_WIDTH / 2) + (x/2)) * 2 ] ];
float V = lookupTableV [uvBuffer [ ((y / 2) * (YUV_WIDTH / 2) + (x/2)) * 2 + 1] ];
The picture looks like you have 4:1:1 format. You should change your lines to
float U = lookupTableU [uvBuffer [ (y * (YUV_WIDTH / 4) + (x/4)) * 2 ] ]
float V = lookupTableU [uvBuffer [ (y * (YUV_WIDTH / 4) + (x/4)) * 2 + 1] ]
Maybe you can post the result to see, what else is wrong. I find it always hard to think about it. It's much easier to approach this iteratively.