Vulkan line drawing not showing up - rendering

So I would like to render the bounding box of a selected object. I have a buffer to store the 8 points and another buffer to store the indices to use line strip drawing of those points to make a box. I captured a frame with RenderDoc and I can see the yellow box bounding box
So it looks like the values are correct and the box is being drawn, but I do not see it in the final render to the screen.. Anyone have an idea what I might be missing here?
VkDescriptorSetLayoutBinding cameraBind = vkinit::descriptorset_layout_binding(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, VK_SHADER_STAGE_VERTEX_BIT, 0);
VkDescriptorSetLayoutCreateInfo setinfo = {};
setinfo.bindingCount = 1;
setinfo.flags = 0;
setinfo.pNext = nullptr;
setinfo.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO;
setinfo.pBindings = &cameraBind;
vkCreateDescriptorSetLayout(_device, &setinfo, nullptr, &_globalSetLayout);
//setup push constant
VkPushConstantRange push_constant;
push_constant.offset = 0;
push_constant.size = sizeof(glm::mat4);
push_constant.stageFlags = VK_SHADER_STAGE_VERTEX_BIT;
VkPipelineLayoutCreateInfo mesh_pipeline_layout_info = vkinit::pipeline_layout_create_info();
mesh_pipeline_layout_info.setLayoutCount = 1;
mesh_pipeline_layout_info.pSetLayouts = &_globalSetLayout;
mesh_pipeline_layout_info.pPushConstantRanges = &push_constant;
mesh_pipeline_layout_info.pushConstantRangeCount = 1;
vkCreatePipelineLayout(_device, &mesh_pipeline_layout_info, nullptr, &_highlightPipelineLayout);
PipelineBuilder highlightPipelineBuilder;
VertexInputDescription description;
VkVertexInputBindingDescription mainBinding = {};
mainBinding.binding = 0;
mainBinding.stride = sizeof(glm::vec3);
mainBinding.inputRate = VK_VERTEX_INPUT_RATE_VERTEX;
description.bindings.push_back(mainBinding);
//Position will be stored at Location 0
VkVertexInputAttributeDescription positionAttribute = {};
positionAttribute.binding = 0;
positionAttribute.location = 0;
positionAttribute.format = VK_FORMAT_R32G32B32_SFLOAT;
positionAttribute.offset = 0;
description.attributes.push_back(positionAttribute);
highlightPipelineBuilder._pipelineLayout = _highlightPipelineLayout;
highlightPipelineBuilder.vertexDescription = description;
highlightPipelineBuilder._inputAssembly = vkinit::input_assembly_create_info(VK_PRIMITIVE_TOPOLOGY_LINE_STRIP);
highlightPipelineBuilder._rasterizer = vkinit::rasterization_state_create_info(VK_POLYGON_MODE_LINE);
highlightPipelineBuilder._depthStencil = vkinit::depth_stencil_create_info(true, true, VK_COMPARE_OP_ALWAYS);
ShaderEffect* lineEffect = new ShaderEffect();
lineEffect->add_stage(_shaderCache.get_shader(shader_path("wk_highlight.vert.spv")), VK_SHADER_STAGE_VERTEX_BIT);
lineEffect->add_stage(_shaderCache.get_shader(shader_path("wk_highlight.frag.spv")), VK_SHADER_STAGE_FRAGMENT_BIT);
lineEffect->reflect_layout(_device, nullptr, 0);
highlightPipelineBuilder.setShaders(lineEffect);
_highlightPipeline = highlightPipelineBuilder.build_pipeline(_device, _renderPass);
and here is the drawing part
vkCmdBindPipeline(cmd, VK_PIPELINE_BIND_POINT_GRAPHICS, _highlightPipeline);
uint32_t camera_data_offset = _dynamicData.push(_camera.matrices);
VkDescriptorBufferInfo camInfo = _dynamicData.source.get_info();
camInfo.range = sizeof(GPUCameraData);
camInfo.offset = camera_data_offset;
VkDescriptorSet cameraSet;
vkutil::DescriptorBuilder::begin(_descriptorLayoutCache, _dynamicDescriptorAllocator)
.bind_buffer(0, &camInfo, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, VK_SHADER_STAGE_VERTEX_BIT)
.build(cameraSet);
vkCmdPushConstants(cmd, _highlightPipelineLayout, VK_SHADER_STAGE_VERTEX_BIT, 0, sizeof(glm::mat4), &_selectedMatrix);
VkDeviceSize offset = 0;
vkCmdBindVertexBuffers(cmd, 0, 1, &_highlightVertexBuffer._buffer, &offset);
vkCmdBindIndexBuffer(cmd, _highlightIndexBuffer._buffer, 0, VK_INDEX_TYPE_UINT16);
vkCmdBindDescriptorSets(cmd, VK_PIPELINE_BIND_POINT_GRAPHICS, _highlightPipelineLayout, 0, 1, &cameraSet, 0, nullptr);
vkCmdDrawIndexed(cmd, 16, 1, 0, 0, 0);
And the shaders
#version 450
layout (location = 0) in vec3 vPosition;
layout(set = 0, binding = 0) uniform CameraBuffer{
mat4 view;
mat4 proj;
mat4 viewproj;
} cameraData;
layout(push_constant) uniform PushConstants {
mat4 matrix;
} pushConstants;
void main()
{
mat4 modelMatrix = pushConstants.matrix;
mat4 transformMatrix = (cameraData.viewproj * modelMatrix);
gl_Position = transformMatrix * vec4(vPosition, 1.0f);
}
fragment shader
//glsl version 4.5
#version 450
//output write
layout (location = 0) out vec4 outFragColor;
void main()
{
outFragColor = vec4(1.0, 1.0, 0.0, 1.0);
}

Related

glreadpixels return wrong value on different screen

So I have plugin that show differently on different machine. On my machine, when I pass in the image I generate with 32 * 32 * 32 value. with every 32 have red value go from 0 - 1. I get back correctly, every 32 pixel, the red value go from 0-1.
But same code, same program I run on my co-worker machine, it not every 32 pixels but 64 pixel the red value go from 0 - 1.
Is this problem related to screen resolution? how I can get back correct value that I pass in.
Example (first 32 pixel with red value)
Pass in
0, 8, 16 24, 32, 40, ... 248
Correct one (my machine)
0, 8, 16 24, .... 248
Wrong ( co-worker machine .
0, 4, 8, 12, 16 ... 124
Here is my code to create the texture base on Bit map
glActiveTexture(GL_TEXTURE0);
glEnable(GL_TEXTURE_RECTANGLE_ARB);
glBindTexture(GL_TEXTURE_RECTANGLE_ARB, _texID);
if (_texID) {
glDeleteTextures(1, &_texID);
_texID = 0;
}
glGenTextures(1, &_texID);
glTexParameteri(GL_TEXTURE_RECTANGLE_ARB, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_RECTANGLE_ARB, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_RECTANGLE_ARB, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glTexEnvf(GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_MODULATE);
glTexImage2D(GL_TEXTURE_RECTANGLE_ARB, 0, GL_RGBA8, (int)_imageSize.width, (int)_imageSize.height, 0, GL_RGBA, GL_UNSIGNED_BYTE, _image_buffer);
glClearColor(0.0, 0.0, 0.0, 0.0);
glClear(GL_COLOR_BUFFER_BIT);
Here code to generate the bit map
[self setFrameSize:_imageSize];
GLubyte al = 255; // alpha value
double increment = (al+1) / val;
_totalByteSize = val*val*val*4;
_image_buffer = new GLubyte[_totalByteSize];
for (int i = 0; i < (_totalByteSize); i++) {
_image_buffer[i] = 0;
}
vector<GLKVector3> data;
GLubyte bl = 0;
GLubyte gr = 0;
GLubyte re = 0;
for ( int b = 0; b < val; b++) {
gr = 0;
for ( int g = 0; g < val; g++) {
re = 0;
for ( int r = 0; r < val; r++) {
int ind = r + g * val + b * val * val;
_image_buffer[ind * 4 + 0] = re;
_image_buffer[ind * 4 + 1] = gr;
_image_buffer[ind * 4 + 2] = bl;
_image_buffer[ind * 4 + 3] = al;
re+= increment; // 256 / 32 = 8
}
gr+= increment; // 256 / 32 = 8
}
bl+= increment; // 256 / 32 = 8
}
And here code to read
int totalByteSize = 32*32*32*3;
GLubyte* bitmap = new GLubyte[totalByteSize];
glReadPixels(0, 0, _imageSize.width, _imageSize.height, GL_RGB, GL_UNSIGNED_BYTE, bitmap);
[_lutData removeAllObjects];
for (int i = 0 ; i <= totalByteSize /3 ; i++) {
double val1 = (double)bitmap[i*3+0] / 256.0;
double val2 = (double)bitmap[i*3+1] / 256.0;
double val3 = (double)bitmap[i*3+2] / 256.0;
[_lutData addObject:#[#(val1), #(val2), #(val3)]];
}
Does this can cause by screen with high resolution or different setting causing it to read wrong

How to do batching without UBOs?

I'm trying to implement batching for a WebGL renderer which is struggling with lots of small objects due to too many draw calls. What I thought is I'd batch them all by the kind of shader they use, then draw a few at a time, uploading material parameters and the model matrix for each object once in uniforms.
My problem is that the uniform size limits for non-UBO uniforms are extremely low, as in 256 floats low at a minimum. If my material uses, say, 8 floats, and if you factor in the model matrix, I barely have enough uniforms to draw 10 models in a single batch, which isn't really going to be enough.
Is there any hope to make this work without UBOs? Are textures an option? How are people doing batching without WebGL2 UBOs?
More details: I have no skinning or complex animations, I just have some shaders (diffuse, cook-torrance, whatever) and each model has different material settings for each shader, e.g. color, roughness, index of refraction which can be changed dynamically by the user (so it's not realistic to bake them into the vertex array because we have some high poly data, also users can switch shaders and not all shaders have the same number of parameters) as well as material maps obviously. The geometry itself is static and just has a linear transform on each model. For the most part all meshes are different so geometry instancing won't help a whole lot, but I can look at that later.
Thanks
I don't know that this is actually faster than lots of draw calls but here is drawing 4 models with a single draw call
It works by adding an id per model. So, for every vertex in model #0 put a 0, for every vertex in model #1 put a 1, etc.
Then it uses model id to index stuff in a texture. The easiest would be model id chooses the row of a texture and then all the data for that model can be pulled out of that row.
For WebGL1
attribute float modelId;
...
#define TEXTURE_WIDTH ??
#define COLOR_OFFSET ((0.0 + 0.5) / TEXTURE_WIDTH)
#define MATERIAL_OFFSET ((1.0 + 0.5) / TEXTURE_WIDTH)
float modelOffset = (modelId + .5) / textureHeight;
vec4 color = texture2D(perModelData, vec2(COLOR_OFFSET, modelOffset));
vec4 roughnessIndexOfRefaction = texture2D(perModelData,
vec2(MATERIAL_OFFSET, modelOffset));
etc..
As long as you are not drawing more than gl.getParameter(gl.MAX_TEXTURE_SIZE) models it will work. If you have more than that either use more draw calls or change the texture coordinate calculations so there's more than one model per row
In WebGL2 you'd change the code to use texelFetch and unsigned integers
in uint modelId;
...
#define COLOR_OFFSET 0
#define MATERIAL_OFFSET 1
vec4 color = texelFetch(perModelData, uvec2(COLOR_OFFSET, modelId));
vec4 roughnessIndexOfRefaction = texelFetch(perModelData,
uvec2(MATERIAL_OFFSET, modelId));
example of 4 models drawn with 1 draw call. For each model the model matrix and color are stored in the texture.
const m4 = twgl.m4;
const v3 = twgl.v3;
const gl = document.querySelector('canvas').getContext('webgl');
const ext = gl.getExtension('OES_texture_float');
if (!ext) {
alert('need OES_texture_float');
}
const COMMON_STUFF = `
#define TEXTURE_WIDTH 5.0
#define MATRIX_ROW_0_OFFSET ((0. + 0.5) / TEXTURE_WIDTH)
#define MATRIX_ROW_1_OFFSET ((1. + 0.5) / TEXTURE_WIDTH)
#define MATRIX_ROW_2_OFFSET ((2. + 0.5) / TEXTURE_WIDTH)
#define MATRIX_ROW_3_OFFSET ((3. + 0.5) / TEXTURE_WIDTH)
#define COLOR_OFFSET ((4. + 0.5) / TEXTURE_WIDTH)
`;
const vs = `
attribute vec4 position;
attribute vec3 normal;
attribute float modelId;
uniform float textureHeight;
uniform sampler2D perModelDataTexture;
uniform mat4 projection;
uniform mat4 view;
varying vec3 v_normal;
varying float v_modelId;
${COMMON_STUFF}
void main() {
v_modelId = modelId; // pass to fragment shader
float modelOffset = (modelId + 0.5) / textureHeight;
// note: in WebGL2 better to use texelFetch
mat4 model = mat4(
texture2D(perModelDataTexture, vec2(MATRIX_ROW_0_OFFSET, modelOffset)),
texture2D(perModelDataTexture, vec2(MATRIX_ROW_1_OFFSET, modelOffset)),
texture2D(perModelDataTexture, vec2(MATRIX_ROW_2_OFFSET, modelOffset)),
texture2D(perModelDataTexture, vec2(MATRIX_ROW_3_OFFSET, modelOffset)));
gl_Position = projection * view * model * position;
v_normal = mat3(view) * mat3(model) * normal;
}
`;
const fs = `
precision highp float;
varying vec3 v_normal;
varying float v_modelId;
uniform float textureHeight;
uniform sampler2D perModelDataTexture;
uniform vec3 lightDirection;
${COMMON_STUFF}
void main() {
float modelOffset = (v_modelId + 0.5) / textureHeight;
vec4 color = texture2D(perModelDataTexture, vec2(COLOR_OFFSET, modelOffset));
float l = dot(lightDirection, normalize(v_normal)) * .5 + .5;
gl_FragColor = vec4(color.rgb * l, color.a);
}
`;
// compile shader, link, look up locations
const programInfo = twgl.createProgramInfo(gl, [vs, fs]);
// make some vertex data
const modelVerts = [
twgl.primitives.createSphereVertices(1, 6, 4),
twgl.primitives.createCubeVertices(1, 1, 1),
twgl.primitives.createCylinderVertices(1, 1, 10, 1),
twgl.primitives.createTorusVertices(1, .2, 16, 8),
];
// merge all the vertices into one
const arrays = twgl.primitives.concatVertices(modelVerts);
// fill an array so each vertex of each model has a modelId
const modelIds = new Uint16Array(arrays.position.length / 3);
let offset = 0;
modelVerts.forEach((verts, modelId) => {
const end = offset + verts.position.length / 3;
while(offset < end) {
modelIds[offset++] = modelId;
}
});
arrays.modelId = { numComponents: 1, data: modelIds };
// calls gl.createBuffer, gl.bindBuffer, gl.bufferData
const bufferInfo = twgl.createBufferInfoFromArrays(gl, arrays);
const numModels = modelVerts.length;
const tex = gl.createTexture();
const textureWidth = 5; // 4x4 matrix, 4x1 color
gl.bindTexture(gl.TEXTURE_2D, tex);
gl.texImage2D(gl.TEXTURE_2D, 0, gl.RGBA, textureWidth, numModels, 0, gl.RGBA, gl.FLOAT, null);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MIN_FILTER, gl.NEAREST);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MAG_FILTER, gl.NEAREST);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_S, gl.CLAMP_TO_EDGE);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_T, gl.CLAMP_TO_EDGE);
// this data is for the texture, one row per model
// first 4 pixels are the model matrix, 5 pixel is the color
const perModelData = new Float32Array(textureWidth * numModels * 4);
const stride = textureWidth * 4;
const modelOffset = 0;
const colorOffset = 16;
// set the colors at init time
for (let modelId = 0; modelId < numModels; ++modelId) {
perModelData.set([r(), r(), r(), 1], modelId * stride + colorOffset);
}
function r() {
return Math.random();
}
function render(time) {
time *= 0.001; // seconds
twgl.resizeCanvasToDisplaySize(gl.canvas);
gl.viewport(0, 0, gl.canvas.width, gl.canvas.height);
gl.enable(gl.DEPTH_TEST);
gl.enable(gl.CULL_FACE);
const fov = Math.PI * 0.25;
const aspect = gl.canvas.clientWidth / gl.canvas.clientHeight;
const near = 0.1;
const far = 20;
const projection = m4.perspective(fov, aspect, near, far);
const eye = [0, 0, 10];
const target = [0, 0, 0];
const up = [0, 1, 0];
const camera = m4.lookAt(eye, target, up);
const view = m4.inverse(camera);
// set the matrix for each model in the texture data
const mat = m4.identity();
for (let modelId = 0; modelId < numModels; ++modelId) {
const t = time * (modelId + 1) * 0.3;
m4.identity(mat);
m4.rotateX(mat, t, mat);
m4.rotateY(mat, t, mat);
m4.translate(mat, [0, 0, Math.sin(t * 1.1) * 4], mat);
m4.rotateZ(mat, t, mat);
perModelData.set(mat, modelId * stride + modelOffset);
}
// upload the texture data
gl.bindTexture(gl.TEXTURE_2D, tex);
gl.texSubImage2D(gl.TEXTURE_2D, 0, 0, 0, textureWidth, numModels,
gl.RGBA, gl.FLOAT, perModelData);
gl.useProgram(programInfo.program);
// calls gl.bindBuffer, gl.enableVertexAttribArray, gl.vertexAttribPointer
twgl.setBuffersAndAttributes(gl, programInfo, bufferInfo);
// calls gl.activeTexture, gl.bindTexture, gl.uniformXXX
twgl.setUniforms(programInfo, {
lightDirection: v3.normalize([1, 2, 3]),
perModelDataTexture: tex,
textureHeight: numModels,
projection,
view,
});
// calls gl.drawArrays or gl.drawElements
twgl.drawBufferInfo(gl, bufferInfo);
requestAnimationFrame(render);
}
requestAnimationFrame(render);
body { margin: 0; }
canvas { width: 100vw; height: 100vh; display: block; }
<script src="https://twgljs.org/dist/4.x/twgl-full.min.js"></script>
<canvas></canvas>
Here's 2000 models in one draw call
https://jsfiddle.net/greggman/g2tcadho/

Can I make the falling sand game run parallel on the GPU?

I've been trying to put together a falling sand type game in Metal, that uses a compute shader to do the work. My initial work looked ok, until I ran into the following problem:
In one update, both particle 1A and 1B could flow into space T.
Sequential
In a sequential loop, I could understand how I would simply move 1A into T, then when it came to evaluate 1B, I could see that the space was already filled.
Parallel
My problem is that with the GPU, The code to check 1A and 1B are both running on different threads, so in one update loop, they can both move to position T, essentially killing one of them.
My question is then, can I make a parallel solution to this, or can it only be solved sequentially?
Here's my calculation code, running a GPU thread for every pixel in a 640 by 480 grid:
struct Particle {
float4 color;
int live;
int particleType;
};
kernel void calculateFrame(
device Particle *src [[buffer(1)]],
device Particle *dst [[buffer(2)]],
uint2 threadgroup_position_in_grid [[ threadgroup_position_in_grid ]],
uint2 thread_position_in_threadgroup [[ thread_position_in_threadgroup ]],
uint2 threads_per_threadgroup [[ threads_per_threadgroup ]]) {
uint2 gid = threadgroup_position_in_grid;
int index = (gid * 640) + gid;
Particle particle = src[index];
Particle bottom = src[toLinear( gid + uint2( 0, 1))]; //bottom
Particle bottomLeft = src[toLinear( gid + uint2(-1, 1))]; //bottom left
Particle bottomRight = src[toLinear( gid + uint2( 1, 1))]; //bottom right
if (particle.live == 1 && particle.particleType == SAND) { // move down
if (bottom.live == 0) {
dst[index].live = 0;
dst[toLinear( gid + uint2( 0, 1))].particleType = particle.particleType;
dst[toLinear( gid + uint2( 0, 1))].color = particle.color;
dst[toLinear( gid + uint2( 0, 1))].live = 1;
}
else if (bottomLeft.live == 0) { // move down to the left if clear
dst[index].live = 0;
dst[toLinear( gid + uint2( -1, 1))].particleType = particle.particleType;
dst[toLinear( gid + uint2( -1, 1))].color = red;
dst[toLinear( gid + uint2( -1, 1))].live = 1;
}
else if (bottomRight.live == 0) { // move down to the right if clear
dst[index].live = 0;
dst[toLinear( gid + uint2( 1, 1))].particleType = particle.particleType;
dst[toLinear( gid + uint2( 1, 1))].color = red;
dst[toLinear( gid + uint2( 1, 1))].live = 1;
}
else { // cant move
dst[index] = src[index];
}
}
if (particle.particleType == WALL) {
dst[index] = particle;
dst[index].live = 1;
}
if (particle.particleType == ERASER) {
dst[index].color = blue;
dst[index].live = 0;
}
}

WebGL-moving the object in the line of sight

I defined my Model-View Matrix defining a function lookAt that represents the eye of the camera, the position of the object that I'm representing and the "up" vector of the camera. How can I move the object in the line of sight of the camera? Any tips? If I define the vector that points to the position of the object and starts at the eye of the camera (so if I define the line of sight) how can I use this to make the object move along this direction?
This is my lookAt function
function lookAt( eye, at, up )
{
if ( !Array.isArray(eye) || eye.length != 3) {
throw "lookAt(): first parameter [eye] must be an a vec3";
}
if ( !Array.isArray(at) || at.length != 3) {
throw "lookAt(): first parameter [at] must be an a vec3";
}
if ( !Array.isArray(up) || up.length != 3) {
throw "lookAt(): first parameter [up] must be an a vec3";
}
if ( equal(eye, at) ) {
return mat4();
}
var v = normalize( subtract(at, eye) ); // view direction vector
var n = normalize( cross(v, up) ); // perpendicular vector
var u = normalize( cross(n, v) ); // "new" up vector
v = negate( v );
var result = mat4(
vec4( n, -dot(n, eye) ),
vec4( u, -dot(u, eye) ),
vec4( v, -dot(v, eye) ),
vec4()
);
return result;
}
Honestly I don't understand you're lookAt function. It's not setting a translation like most look at functions.
Here's a different lookAt function that generates a camera matrix,a matrix that positions the camera in the world. That's in contrast to a lookAt function that generates a view matrix, a matrix that moves everything in the world in front of the camera.
function lookAt(eye, target, up) {
const zAxis = v3.normalize(v3.subtract(eye, target));
const xAxis = v3.normalize(v3.cross(up, zAxis));
const yAxis = v3.normalize(v3.cross(zAxis, xAxis));
return [
...xAxis, 0,
...yAxis, 0,
...zAxis, 0,
...eye, 1,
];
}
Here's an article with some detail about a lookAt matrix.
I find camera matrices more useful can view matrixes because a camera matrix (or a lookAt matrix) and be used to make heads look at other things. Gun turrets look at targets, eyes look at interests, where as a view matrix can pretty much only be used for one thing. You can get one from the other by taking the inverse. But since a scene with turrets, eyes, and heads of characters tracking things might need 50+ lookAt matrices it seems far more useful to generate that kind of matrix and take 1 inverse or a view matrix than to generate 50+ view matrices and have to invert all but 1 of them.
You can move any object relative to the way the camera is facing by taking an axis of the camera matrix and multiplying by some scalar. The xAxis will move left and right perpendicular to the camera, the yAxis up and down perpendicular to the camera, and the zAxis forward/backward in the direction the camera is facing.
The axis of the camera matrix are
+----+----+----+----+
| xx | xy | xz | | xaxis
+----+----+----+----+
| yx | yy | yz | | yaxis
+----+----+----+----+
| zx | zy | zz | | zaxis
+----+----+----+----+
| tx | ty | tz | | translation
+----+----+----+----+
In other words
const camera = lookAt(eye, target, up);
const xaxis = camera.slice(0, 3);
const yaxis = camera.slice(4, 7);
const zaxis = camera.slice(8, 11);
Now you can translate forward or back with
matrix = mult(matrix, zaxis); // moves 1 unit away from camera
Multiply zaxis by the amount you want to move
moveVec = [zaxis[0] * moveAmount, zaxis[1] * moveAmount, zaxis[2] * moveAmount];
matrix = mult(matrix, moveVec); // moves moveAmount units away from camera
Or if you have your translation stored elsewhere just add the zaxis in
// assuming tx, ty, and tz are our translation
tx += zaxis[0] * moveAmount;
ty += zaxis[1] * moveAmount;
tz += zaxis[2] * moveAmount;
const vs = `
uniform mat4 u_worldViewProjection;
attribute vec4 position;
attribute vec2 texcoord;
varying vec4 v_position;
varying vec2 v_texcoord;
void main() {
v_texcoord = texcoord;
gl_Position = u_worldViewProjection * position;
}
`;
const fs = `
precision mediump float;
varying vec2 v_texcoord;
uniform sampler2D u_texture;
void main() {
gl_FragColor = texture2D(u_texture, v_texcoord);
}
`;
"use strict";
const m4 = twgl.m4;
const v3 = twgl.v3;
const gl = document.getElementById("c").getContext("webgl");
// compiles shaders, links program, looks up locations
const programInfo = twgl.createProgramInfo(gl, [vs, fs]);
// calls gl.createBuffer, gl.bindBuffer, gl.bufferData for positions, texcoords
const bufferInfo = twgl.primitives.createCubeBufferInfo(gl);
// calls gl.createTexture, gl.bindTexture, gl.texImage2D, gl.texParameteri
const tex = twgl.createTexture(gl, {
min: gl.NEAREST,
mag: gl.NEAREST,
src: [
255, 64, 64, 255,
64, 192, 64, 255,
64, 64, 255, 255,
255, 224, 64, 255,
],
});
const settings = {
xoff: 0,
yoff: 0,
zoff: 0,
};
function render(time) {
time *= 0.001;
twgl.resizeCanvasToDisplaySize(gl.canvas);
gl.viewport(0, 0, gl.canvas.width, gl.canvas.height);
gl.enable(gl.DEPTH_TEST);
gl.enable(gl.CULL_FACE);
gl.clear(gl.COLOR_BUFFER_BIT | gl.DEPTH_BUFFER_BIT);
const fov = 45 * Math.PI / 180;
const aspect = gl.canvas.clientWidth / gl.canvas.clientHeight;
const zNear = 0.01;
const zFar = 100;
const projection = m4.perspective(fov, aspect, zNear, zFar);
const eye = [3, 4, -6];
const target = [0, 0, 0];
const up = [0, 1, 0];
const camera = m4.lookAt(eye, target, up);
const view = m4.inverse(camera);
const viewProjection = m4.multiply(projection, view);
gl.useProgram(programInfo.program);
// calls gl.bindBuffer, gl.enableVertexAttribArray, gl.vertexAttribPointer
twgl.setBuffersAndAttributes(gl, programInfo, bufferInfo);
const t = time * .1;
for (let z = -1; z <= 1; ++z) {
for (let x = -1; x <= 1; ++x) {
const world = m4.identity();
m4.translate(world, v3.mulScalar(camera.slice(0, 3), settings.xoff), world);
m4.translate(world, v3.mulScalar(camera.slice(4, 7), settings.yoff), world);
m4.translate(world, v3.mulScalar(camera.slice(8, 11), settings.zoff), world);
m4.translate(world, [x * 1.4, 0, z * 1.4], world);
m4.rotateY(world, t + z + x, world);
// calls gl.uniformXXX
twgl.setUniforms(programInfo, {
u_texture: tex,
u_worldViewProjection: m4.multiply(viewProjection, world),
});
// calls gl.drawArrays or gl.drawElements
twgl.drawBufferInfo(gl, bufferInfo);
}
}
requestAnimationFrame(render);
}
requestAnimationFrame(render);
setupSlider("#xSlider", "#xoff", "xoff");
setupSlider("#ySlider", "#yoff", "yoff");
setupSlider("#zSlider", "#zoff", "zoff");
function setupSlider(sliderId, labelId, property) {
const slider = document.querySelector(sliderId);
const label = document.querySelector(labelId);
function updateLabel() {
label.textContent = settings[property].toFixed(2);
}
slider.addEventListener('input', e => {
settings[property] = (parseInt(slider.value) / 100 * 2 - 1) * 5;
updateLabel();
});
updateLabel();
slider.value = (settings[property] / 5 * .5 + .5) * 100;
}
body { margin: 0; }
canvas { display: block; width: 100vw; height: 100vh; }
#ui {
position: absolute;
left: 10px;
top: 10px;
z-index: 2;
background: rgba(255, 255, 255, 0.9);
padding: .5em;
}
<script src="https://twgljs.org/dist/3.x/twgl-full.min.js"></script>
<canvas id="c"></canvas>
<div id="ui">
<div><input id="xSlider" type="range" min="0" max="100"/><label>xoff: <span id="xoff"></span></label></div>
<div><input id="ySlider" type="range" min="0" max="100"/><label>yoff: <span id="yoff"></span></label></div>
<div><input id="zSlider" type="range" min="0" max="100"/><label>zoff: <span id="zoff"></span></label></div>
</div>

Dri3 and Present extension on client application

I am looking for someone who has experience/knowledge on how to implement dri3 and present extension in client application.
My use case will be:
I have a dma buffer generated at my application and want to use present extension to render it to screen.
Thanks Sharvil111 and Grey for the prompt response.
I found a present test from xf86-intel-driver - test/present-test.c. I ported the below code to my app. I am getting a black screen at the end. I would expect the pixmap created is empty and therefore a black screen is expected.
Correct me if I am wrong.
static int test_dri3(Display *dpy)
{
Window win = DefaultRootWindow(dpy);
Pixmap pixmap;
Window root;
unsigned int width, height;
unsigned border, depth;
unsigned stride, size;
int x, y, ret = 1;
int device, handle;
int bpp;
device = dri3_open(dpy);
if (device < 0)
return 0;
if (!is_intel(device))
return 0;
printf("Opened Intel DRI3 device\n");
XGetGeometry(dpy, win, &root, &x, &y,
&width, &height, &border, &depth);
switch (depth) {
case 8: bpp = 8; break;
case 15: case 16: bpp = 16; break;
case 24: case 32: bpp = 32; break;
default: return 0;
}
stride = width * bpp/8;
size = PAGE_ALIGN(stride * height);
printf("Creating DRI3 %dx%d (source stride=%d, size=%d) for GTT\n",
width, height, stride, size);
pixmap = 0;
handle = gem_create(device, size);
if (handle) {
pixmap = dri3_create_pixmap(dpy, root,
width, height, depth,
gem_export(device, handle), bpp, stride, size);
gem_close(device, handle);
}
if (pixmap == 0)
goto fail;
xcb_present_pixmap(XGetXCBConnection(dpy),
win, pixmap,
0, /* sbc */
0, /* valid */
0, /* update */
0, /* x_off */
0, /* y_off */
None,
None, /* wait fence */
None,
XCB_PRESENT_OPTION_NONE,
0, /* target msc */
0, /* divisor */
0, /* remainder */
0, NULL);
XFreePixmap(dpy, pixmap);
XSync(dpy, True);
if (_x_error_occurred)
goto fail;
fail:
close(device);
return ret;
}