First of all, I'm new to Rust so excuse any "not so great" implementations.
I'm building a game where I need to draw a texture by changing values in a rgba vector that later gets turned into an image (it's for a raycaster game if you're wondering). Inside said function, I mutiply the current pixel with a floating point value called shade that darkens or brightens the pixel depending on the lighting. Since this gets called a lot of times and the profiler flags it as a hotspot, I'm trying to make it as fast as possible, given the current implementation is not the best.
pub fn draw_texture(
&mut self,
texture: &Vec<u8>,
texture_position: [usize; 2],
pixel_position: [usize; 2],
width_rect: usize,
shade: f32,
length: usize,
) {
let pos = (texture_position[1] * length + texture_position[0]) << 2; // Position of current pixel
(0..width_rect).for_each(|i| { // Draws in rectangles of size 1 x width_rect
let mut pixel: [u8; 4] = texture[pos..pos + 4].try_into().unwrap(); //rgba pixel
if shade != 1.0 { //Draws shade depending on current lighting, darkening or brightening the pixel
pixel[0] = (pixel[0] as f32 * shade) as u8;
pixel[1] = (pixel[1] as f32 * shade) as u8;
pixel[2] = (pixel[2] as f32 * shade) as u8;
}
if pixel[3] == 255 {
//Doesn't draw transparent pixels
self.draw_pixel([pixel_position[0] + i, pixel_position[1]], &pixel);
}
});
}
pub fn draw_pixel(&mut self, position: [usize; 2], pixel: &[u8; 4]) {
let i = position[1] * self.width + position[0];
if (i << 2) + 4 < self.img_arr_len {
self.img_arr[(i << 2)..(i << 2) + 4].copy_from_slice(pixel);
}
}
So I would like to render the bounding box of a selected object. I have a buffer to store the 8 points and another buffer to store the indices to use line strip drawing of those points to make a box. I captured a frame with RenderDoc and I can see the yellow box bounding box
So it looks like the values are correct and the box is being drawn, but I do not see it in the final render to the screen.. Anyone have an idea what I might be missing here?
VkDescriptorSetLayoutBinding cameraBind = vkinit::descriptorset_layout_binding(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, VK_SHADER_STAGE_VERTEX_BIT, 0);
VkDescriptorSetLayoutCreateInfo setinfo = {};
setinfo.bindingCount = 1;
setinfo.flags = 0;
setinfo.pNext = nullptr;
setinfo.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO;
setinfo.pBindings = &cameraBind;
vkCreateDescriptorSetLayout(_device, &setinfo, nullptr, &_globalSetLayout);
//setup push constant
VkPushConstantRange push_constant;
push_constant.offset = 0;
push_constant.size = sizeof(glm::mat4);
push_constant.stageFlags = VK_SHADER_STAGE_VERTEX_BIT;
VkPipelineLayoutCreateInfo mesh_pipeline_layout_info = vkinit::pipeline_layout_create_info();
mesh_pipeline_layout_info.setLayoutCount = 1;
mesh_pipeline_layout_info.pSetLayouts = &_globalSetLayout;
mesh_pipeline_layout_info.pPushConstantRanges = &push_constant;
mesh_pipeline_layout_info.pushConstantRangeCount = 1;
vkCreatePipelineLayout(_device, &mesh_pipeline_layout_info, nullptr, &_highlightPipelineLayout);
PipelineBuilder highlightPipelineBuilder;
VertexInputDescription description;
VkVertexInputBindingDescription mainBinding = {};
mainBinding.binding = 0;
mainBinding.stride = sizeof(glm::vec3);
mainBinding.inputRate = VK_VERTEX_INPUT_RATE_VERTEX;
description.bindings.push_back(mainBinding);
//Position will be stored at Location 0
VkVertexInputAttributeDescription positionAttribute = {};
positionAttribute.binding = 0;
positionAttribute.location = 0;
positionAttribute.format = VK_FORMAT_R32G32B32_SFLOAT;
positionAttribute.offset = 0;
description.attributes.push_back(positionAttribute);
highlightPipelineBuilder._pipelineLayout = _highlightPipelineLayout;
highlightPipelineBuilder.vertexDescription = description;
highlightPipelineBuilder._inputAssembly = vkinit::input_assembly_create_info(VK_PRIMITIVE_TOPOLOGY_LINE_STRIP);
highlightPipelineBuilder._rasterizer = vkinit::rasterization_state_create_info(VK_POLYGON_MODE_LINE);
highlightPipelineBuilder._depthStencil = vkinit::depth_stencil_create_info(true, true, VK_COMPARE_OP_ALWAYS);
ShaderEffect* lineEffect = new ShaderEffect();
lineEffect->add_stage(_shaderCache.get_shader(shader_path("wk_highlight.vert.spv")), VK_SHADER_STAGE_VERTEX_BIT);
lineEffect->add_stage(_shaderCache.get_shader(shader_path("wk_highlight.frag.spv")), VK_SHADER_STAGE_FRAGMENT_BIT);
lineEffect->reflect_layout(_device, nullptr, 0);
highlightPipelineBuilder.setShaders(lineEffect);
_highlightPipeline = highlightPipelineBuilder.build_pipeline(_device, _renderPass);
and here is the drawing part
vkCmdBindPipeline(cmd, VK_PIPELINE_BIND_POINT_GRAPHICS, _highlightPipeline);
uint32_t camera_data_offset = _dynamicData.push(_camera.matrices);
VkDescriptorBufferInfo camInfo = _dynamicData.source.get_info();
camInfo.range = sizeof(GPUCameraData);
camInfo.offset = camera_data_offset;
VkDescriptorSet cameraSet;
vkutil::DescriptorBuilder::begin(_descriptorLayoutCache, _dynamicDescriptorAllocator)
.bind_buffer(0, &camInfo, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, VK_SHADER_STAGE_VERTEX_BIT)
.build(cameraSet);
vkCmdPushConstants(cmd, _highlightPipelineLayout, VK_SHADER_STAGE_VERTEX_BIT, 0, sizeof(glm::mat4), &_selectedMatrix);
VkDeviceSize offset = 0;
vkCmdBindVertexBuffers(cmd, 0, 1, &_highlightVertexBuffer._buffer, &offset);
vkCmdBindIndexBuffer(cmd, _highlightIndexBuffer._buffer, 0, VK_INDEX_TYPE_UINT16);
vkCmdBindDescriptorSets(cmd, VK_PIPELINE_BIND_POINT_GRAPHICS, _highlightPipelineLayout, 0, 1, &cameraSet, 0, nullptr);
vkCmdDrawIndexed(cmd, 16, 1, 0, 0, 0);
And the shaders
#version 450
layout (location = 0) in vec3 vPosition;
layout(set = 0, binding = 0) uniform CameraBuffer{
mat4 view;
mat4 proj;
mat4 viewproj;
} cameraData;
layout(push_constant) uniform PushConstants {
mat4 matrix;
} pushConstants;
void main()
{
mat4 modelMatrix = pushConstants.matrix;
mat4 transformMatrix = (cameraData.viewproj * modelMatrix);
gl_Position = transformMatrix * vec4(vPosition, 1.0f);
}
fragment shader
//glsl version 4.5
#version 450
//output write
layout (location = 0) out vec4 outFragColor;
void main()
{
outFragColor = vec4(1.0, 1.0, 0.0, 1.0);
}
I'm trying to implement batching for a WebGL renderer which is struggling with lots of small objects due to too many draw calls. What I thought is I'd batch them all by the kind of shader they use, then draw a few at a time, uploading material parameters and the model matrix for each object once in uniforms.
My problem is that the uniform size limits for non-UBO uniforms are extremely low, as in 256 floats low at a minimum. If my material uses, say, 8 floats, and if you factor in the model matrix, I barely have enough uniforms to draw 10 models in a single batch, which isn't really going to be enough.
Is there any hope to make this work without UBOs? Are textures an option? How are people doing batching without WebGL2 UBOs?
More details: I have no skinning or complex animations, I just have some shaders (diffuse, cook-torrance, whatever) and each model has different material settings for each shader, e.g. color, roughness, index of refraction which can be changed dynamically by the user (so it's not realistic to bake them into the vertex array because we have some high poly data, also users can switch shaders and not all shaders have the same number of parameters) as well as material maps obviously. The geometry itself is static and just has a linear transform on each model. For the most part all meshes are different so geometry instancing won't help a whole lot, but I can look at that later.
Thanks
I don't know that this is actually faster than lots of draw calls but here is drawing 4 models with a single draw call
It works by adding an id per model. So, for every vertex in model #0 put a 0, for every vertex in model #1 put a 1, etc.
Then it uses model id to index stuff in a texture. The easiest would be model id chooses the row of a texture and then all the data for that model can be pulled out of that row.
For WebGL1
attribute float modelId;
...
#define TEXTURE_WIDTH ??
#define COLOR_OFFSET ((0.0 + 0.5) / TEXTURE_WIDTH)
#define MATERIAL_OFFSET ((1.0 + 0.5) / TEXTURE_WIDTH)
float modelOffset = (modelId + .5) / textureHeight;
vec4 color = texture2D(perModelData, vec2(COLOR_OFFSET, modelOffset));
vec4 roughnessIndexOfRefaction = texture2D(perModelData,
vec2(MATERIAL_OFFSET, modelOffset));
etc..
As long as you are not drawing more than gl.getParameter(gl.MAX_TEXTURE_SIZE) models it will work. If you have more than that either use more draw calls or change the texture coordinate calculations so there's more than one model per row
In WebGL2 you'd change the code to use texelFetch and unsigned integers
in uint modelId;
...
#define COLOR_OFFSET 0
#define MATERIAL_OFFSET 1
vec4 color = texelFetch(perModelData, uvec2(COLOR_OFFSET, modelId));
vec4 roughnessIndexOfRefaction = texelFetch(perModelData,
uvec2(MATERIAL_OFFSET, modelId));
example of 4 models drawn with 1 draw call. For each model the model matrix and color are stored in the texture.
const m4 = twgl.m4;
const v3 = twgl.v3;
const gl = document.querySelector('canvas').getContext('webgl');
const ext = gl.getExtension('OES_texture_float');
if (!ext) {
alert('need OES_texture_float');
}
const COMMON_STUFF = `
#define TEXTURE_WIDTH 5.0
#define MATRIX_ROW_0_OFFSET ((0. + 0.5) / TEXTURE_WIDTH)
#define MATRIX_ROW_1_OFFSET ((1. + 0.5) / TEXTURE_WIDTH)
#define MATRIX_ROW_2_OFFSET ((2. + 0.5) / TEXTURE_WIDTH)
#define MATRIX_ROW_3_OFFSET ((3. + 0.5) / TEXTURE_WIDTH)
#define COLOR_OFFSET ((4. + 0.5) / TEXTURE_WIDTH)
`;
const vs = `
attribute vec4 position;
attribute vec3 normal;
attribute float modelId;
uniform float textureHeight;
uniform sampler2D perModelDataTexture;
uniform mat4 projection;
uniform mat4 view;
varying vec3 v_normal;
varying float v_modelId;
${COMMON_STUFF}
void main() {
v_modelId = modelId; // pass to fragment shader
float modelOffset = (modelId + 0.5) / textureHeight;
// note: in WebGL2 better to use texelFetch
mat4 model = mat4(
texture2D(perModelDataTexture, vec2(MATRIX_ROW_0_OFFSET, modelOffset)),
texture2D(perModelDataTexture, vec2(MATRIX_ROW_1_OFFSET, modelOffset)),
texture2D(perModelDataTexture, vec2(MATRIX_ROW_2_OFFSET, modelOffset)),
texture2D(perModelDataTexture, vec2(MATRIX_ROW_3_OFFSET, modelOffset)));
gl_Position = projection * view * model * position;
v_normal = mat3(view) * mat3(model) * normal;
}
`;
const fs = `
precision highp float;
varying vec3 v_normal;
varying float v_modelId;
uniform float textureHeight;
uniform sampler2D perModelDataTexture;
uniform vec3 lightDirection;
${COMMON_STUFF}
void main() {
float modelOffset = (v_modelId + 0.5) / textureHeight;
vec4 color = texture2D(perModelDataTexture, vec2(COLOR_OFFSET, modelOffset));
float l = dot(lightDirection, normalize(v_normal)) * .5 + .5;
gl_FragColor = vec4(color.rgb * l, color.a);
}
`;
// compile shader, link, look up locations
const programInfo = twgl.createProgramInfo(gl, [vs, fs]);
// make some vertex data
const modelVerts = [
twgl.primitives.createSphereVertices(1, 6, 4),
twgl.primitives.createCubeVertices(1, 1, 1),
twgl.primitives.createCylinderVertices(1, 1, 10, 1),
twgl.primitives.createTorusVertices(1, .2, 16, 8),
];
// merge all the vertices into one
const arrays = twgl.primitives.concatVertices(modelVerts);
// fill an array so each vertex of each model has a modelId
const modelIds = new Uint16Array(arrays.position.length / 3);
let offset = 0;
modelVerts.forEach((verts, modelId) => {
const end = offset + verts.position.length / 3;
while(offset < end) {
modelIds[offset++] = modelId;
}
});
arrays.modelId = { numComponents: 1, data: modelIds };
// calls gl.createBuffer, gl.bindBuffer, gl.bufferData
const bufferInfo = twgl.createBufferInfoFromArrays(gl, arrays);
const numModels = modelVerts.length;
const tex = gl.createTexture();
const textureWidth = 5; // 4x4 matrix, 4x1 color
gl.bindTexture(gl.TEXTURE_2D, tex);
gl.texImage2D(gl.TEXTURE_2D, 0, gl.RGBA, textureWidth, numModels, 0, gl.RGBA, gl.FLOAT, null);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MIN_FILTER, gl.NEAREST);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MAG_FILTER, gl.NEAREST);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_S, gl.CLAMP_TO_EDGE);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_T, gl.CLAMP_TO_EDGE);
// this data is for the texture, one row per model
// first 4 pixels are the model matrix, 5 pixel is the color
const perModelData = new Float32Array(textureWidth * numModels * 4);
const stride = textureWidth * 4;
const modelOffset = 0;
const colorOffset = 16;
// set the colors at init time
for (let modelId = 0; modelId < numModels; ++modelId) {
perModelData.set([r(), r(), r(), 1], modelId * stride + colorOffset);
}
function r() {
return Math.random();
}
function render(time) {
time *= 0.001; // seconds
twgl.resizeCanvasToDisplaySize(gl.canvas);
gl.viewport(0, 0, gl.canvas.width, gl.canvas.height);
gl.enable(gl.DEPTH_TEST);
gl.enable(gl.CULL_FACE);
const fov = Math.PI * 0.25;
const aspect = gl.canvas.clientWidth / gl.canvas.clientHeight;
const near = 0.1;
const far = 20;
const projection = m4.perspective(fov, aspect, near, far);
const eye = [0, 0, 10];
const target = [0, 0, 0];
const up = [0, 1, 0];
const camera = m4.lookAt(eye, target, up);
const view = m4.inverse(camera);
// set the matrix for each model in the texture data
const mat = m4.identity();
for (let modelId = 0; modelId < numModels; ++modelId) {
const t = time * (modelId + 1) * 0.3;
m4.identity(mat);
m4.rotateX(mat, t, mat);
m4.rotateY(mat, t, mat);
m4.translate(mat, [0, 0, Math.sin(t * 1.1) * 4], mat);
m4.rotateZ(mat, t, mat);
perModelData.set(mat, modelId * stride + modelOffset);
}
// upload the texture data
gl.bindTexture(gl.TEXTURE_2D, tex);
gl.texSubImage2D(gl.TEXTURE_2D, 0, 0, 0, textureWidth, numModels,
gl.RGBA, gl.FLOAT, perModelData);
gl.useProgram(programInfo.program);
// calls gl.bindBuffer, gl.enableVertexAttribArray, gl.vertexAttribPointer
twgl.setBuffersAndAttributes(gl, programInfo, bufferInfo);
// calls gl.activeTexture, gl.bindTexture, gl.uniformXXX
twgl.setUniforms(programInfo, {
lightDirection: v3.normalize([1, 2, 3]),
perModelDataTexture: tex,
textureHeight: numModels,
projection,
view,
});
// calls gl.drawArrays or gl.drawElements
twgl.drawBufferInfo(gl, bufferInfo);
requestAnimationFrame(render);
}
requestAnimationFrame(render);
body { margin: 0; }
canvas { width: 100vw; height: 100vh; display: block; }
<script src="https://twgljs.org/dist/4.x/twgl-full.min.js"></script>
<canvas></canvas>
Here's 2000 models in one draw call
https://jsfiddle.net/greggman/g2tcadho/
How can I plot an array to an imageview as a graph?
I've been testing this in Playground and it works, but how can plot this as an imageview in an actual project?
let sineArraySize = 64
let frequency1 = 4.0
let phase1 = 0.0
let amplitude1 = 2.0
let sineWave = (0..<sineArraySize).map {
amplitude1 * sin(2.0 * M_PI / Double(sineArraySize) * Double($0) * frequency1 + phase1)
}
func plotArrayInPlayground<T>(arrayToPlot:Array<T>, title:String) {
for currentValue in arrayToPlot {
XCPCaptureValue(title, currentValue)
}
}
plotArrayInPlayground(sineWave, "Sine wave 1")
One way you could do this:
// this function creates a plot of an array of doubles where it scales to the provided width and the x-axis is on half height
func plotArray(arr: [Double], width: Double, height: Double) -> NSImage {
if arr.isEmpty { return NSImage() }
let xAxisHeight = height / 2
let increment = width / Double(arr.count)
let image = NSImage(size: NSSize(width: width, height: height))
image.lockFocus()
// set background color
NSColor.whiteColor().set()
NSRectFill(NSRect(x: 0, y: 0, width: width, height: height))
let path = NSBezierPath()
// line width of plot
path.lineWidth = 5
path.moveToPoint(NSPoint(x: 0, y: arr[0] * increment + xAxisHeight))
var i = increment
for value in dropFirst(sineWave) {
path.lineToPoint(NSPoint(x: i, y: value * increment + xAxisHeight))
i += increment
}
// set plot color
NSColor.blueColor().set()
path.stroke()
image.unlockFocus()
return image
}
var imageView = NSImageView()
imageView.image = plotArray(sineWave, 500, 200)
// have fun
I have a plane/board, with a grid, that is about 1100 x 1100. I have panning, zooming, and rotating working except for the fact that the board moves back to the center of the screen after it has been panned. So, if I don't pan the board at all then everything works. After I pan the board it moves back to the center of the screen when I try to rotate it. I cannot figure out how to change the origin of the camera so that it rotates around the center of the board. It seems like it rotates around the center of the camera.
var radius = 1500, theta = 45 * 0.5, onMouseDownTheta = 45 * 0.5;
var fov = 45;
var mouse2D = new THREE.Vector3(0, 10000, 0.5);
cameraX = radius * Math.sin(THREE.Math.degToRad(theta));
cameraY = 1000;
cameraZ = radius * Math.cos(THREE.Math.degToRad(theta));
camera = new THREE.PerspectiveCamera(fov, window.innerWidth / window.innerHeight, 1, 10000);
camera.position.set(cameraX, cameraY, cameraZ);
scene = new THREE.Scene();
camera.lookAt(scene.position);
render();
ThreeBoard.prototype.render = function() {
mouse2D.x = (event.clientX / window.innerWidth) * 2 - 1;
mouse2D.y = - (event.clientY / window.innerHeight) * 2 + 1;
// rotate
if (isMouseDown && isShiftPressed && !isCtrlPressed) {
theta = ((event.pageX - mouse2D.x) * 0.5) + onMouseDownTheta;
cameraX = radius * Math.sin(THREE.Math.degToRad(theta));
cameraZ = radius * Math.cos(THREE.Math.degToRad(theta));
camera.position.set(cameraX, cameraY, cameraZ);
camera.lookAt(scene.position);
}
// pan
if (isMouseDown && isShiftPressed && isCtrlPressed) {
theta = ((event.pageX - mouse2D.x) * 0.5) + onMouseDownTheta;
cameraX += 10 * mouse2D.x;
// cameraY += 10;
cameraZ -= 10 * mouse2D.y;
camera.position.set(cameraX, cameraY, cameraZ);
// camera.lookAt(scene.position);
}
renderer.render(scene, camera);
};