Not using vertex attributes based on push constants - vulkan

I have a GLSL vertex shader where one of the attributes is only used if a push constant is set:
layout(location = 0) in ivec2 a_pos;
layout(location = 1) in ivec2 a_nrm;
layout(location = 2) in float a_Height;
void main()
{
<...>
float Offset = ( u_enabling_flag > 0.0 ) ? a_Height : 0.0;
< some calculation involving Offset >
I get the following validation error:
vkDebug: Validation: 0: Validation Error: [ UNASSIGNED-CoreValidation-Shader-InputNotProduced ] Object 0: handle = 0x3a000000003a, type = VK_OBJECT_TYPE_SHADER_MODULE; | MessageID = 0x23e43bb7 | Vertex shader consumes input at location 2 but not provided
The graphical output looks fine but is there a possibility to get rid of the error?
Regards.

The graphical output looks fine but is there a possibility to get rid of the error?
"Vertex shader consumes input at location 2 but not provided"
Remove the input at location 2 from the shader, or attach a buffer binding at that location.

Related

Vulkan Invalid attribAddress alignment for vertex attribute 0

I really do not understand what the validation error means. There is no alignment requirement for vertex shader attributes in VkPhysicalDeviceLimits.
Here is the full error:
[2022-02-07.20:42:38]: Validation Error: [ VUID-vkCmdDrawIndexed-None-02721 ] Object 0: handle = 0xfef35a00000000a0, type = VK_OBJECT_TYPE_BUFFER; Object 1: handle = 0xa56ac00000000d4, type = VK_OBJECT_TYPE_PIPELINE; | MessageID = 0x24afafc5 | vkCmdDrawIndexed: Invalid attribAddress alignment for vertex attribute 0, VK_FORMAT_R32G32B32_SFLOAT,from of VkPipeline 0xa56ac00000000d4[] and vertex VkBuffer 0xfef35a00000000a0[]. The Vulkan spec states: For a given vertex buffer binding, any attribute data fetched must be entirely contained within the corresponding vertex buffer binding, as described in Vertex Input Description (https://vulkan.lunarg.com/doc/view/1.2.198.1/windows/1.2-extensions/vkspec.html#VUID-vkCmdDrawIndexed-None-02721)
Here is my shader.
#version 450 core
layout (location = 0) in vec3 inPosition;
layout (location = 1) in vec3 inNormal;
layout (location = 2) in vec2 inTexCoord;
// Instancing Data
//layout (location = 3) in vec2 XYOffset;
layout (binding = 0) uniform View_Projection {
mat4 u_View;
mat4 u_Projection;
};
layout (binding = 1) uniform Model {
mat4 u_Model;
mat4 u_NormalModel; // transpose(inverse(u_Model))
};
layout (location = 0) out vec3 Normal;
layout (location = 1) out vec2 TexCoord;
void main() {
Normal = mat3(u_NormalModel) * inNormal;
TexCoord = inTexCoord;
//vec4 InstancePosition = vec4(inPosition.xy + XYOffset, inPosition.z, 1.0);
//gl_Position = u_Projection * u_View * u_Model * InstancePosition;
gl_Position = u_Projection * u_View * u_Model * vec4(inPosition, 1.0);
}
UPDATE 1:
struct VulkanPipelineVertexInput
{
VkPipelineVertexInputStateCreateInfo createInfo;
std::vector<VkVertexInputBindingDescription> binding_descriptions;
std::vector<VkVertexInputAttributeDescription> attribute_descriptions;
std::vector<VkVertexInputBindingDivisorDescriptionEXT> divisor_description;
VkPipelineVertexInputDivisorStateCreateInfoEXT divisorCreateInfo;
};
static VulkanPipelineVertexInput Vulkan_Internal_PipelineState_InitalizeVertexInput(IPipelineLayout layout)
{
VulkanPipelineVertexInput input_state;
PipelineVertexInputDescription &input_description = layout->m_vertex_input_description;
if(input_description.m_input_elements.size() > 0)
{
int lastBinding = -1;
for(const auto& element : input_description.m_input_elements)
{
VkVertexInputAttributeDescription attribute;
attribute.binding = element.m_binding_id;
attribute.format = element.m_vk_format;
attribute.location = element.m_location;
attribute.offset = element.m_offset;
input_state.attribute_descriptions.push_back(attribute);
if(lastBinding != element.m_binding_id)
{
lastBinding = element.m_binding_id;
VkVertexInputBindingDescription binding_description;
binding_description.binding = element.m_binding_id;
binding_description.inputRate = element.m_per_instance ? VK_VERTEX_INPUT_RATE_INSTANCE : VK_VERTEX_INPUT_RATE_VERTEX;
binding_description.stride = element.m_stride;
if(element.m_per_instance)
{
VkVertexInputBindingDivisorDescriptionEXT divisor_description;
divisor_description.binding = element.m_binding_id;
divisor_description.divisor = element.m_divisor_rate;
input_state.divisor_description.push_back(divisor_description);
}
input_state.binding_descriptions.push_back(binding_description);
}
}
}
input_state.createInfo.sType = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO;
input_state.createInfo.pNext = input_state.divisor_description.size() > 0 ? &input_state.divisorCreateInfo : nullptr;
input_state.createInfo.flags = 0;
input_state.createInfo.vertexBindingDescriptionCount = input_state.binding_descriptions.size();
input_state.createInfo.pVertexBindingDescriptions = input_state.binding_descriptions.data();
input_state.createInfo.vertexAttributeDescriptionCount = input_state.attribute_descriptions.size();
input_state.createInfo.pVertexAttributeDescriptions = input_state.attribute_descriptions.data();
return input_state;
}
UPDATE 2:
This code does render, however some geometry is shifted.
Render Output
Vertex attributes must be naturally aligned on the primitive type. For VK_FORMAT_R32G32B32_SFLOAT this means a 4 byte boundary.
Section 22.4.1 in the Vulkan 1.3 spec:
For each attribute, raw data is extracted starting at attribAddress and is converted from the
VkVertexInputAttributeDescription’s format to either floating-point, unsigned integer, or signed
integer based on the base type of the format; the base type of the format must match the base type
of the input variable in the shader. The input variable in the shader must be declared as a 64-bit
data type if and only if format is a 64-bit data type. If format is a packed format, attribAddress must
be a multiple of the size in bytes of the whole attribute data type as described in Packed Formats.
Otherwise, attribAddress must be a multiple of the size in bytes of the component type indicated by
format (see Formats).
https://github.com/KhronosGroup/Vulkan-ValidationLayers/issues/3733
I figured it out!!!!!!!!! I thought this was a driver bug.
I opened the program in RenderDoc and it showed that the vertex buffer had an offset of 1
The problem was setting the vkCmdSetVertexBuffers wrong. vkCmdSetVertexBuffers requires an array of VkDeviceSize[] pOffsets even if you don't use them. I made a typo, writing VkDeviceSize[1] pOffsets = {1} instead of VkDeviceSize[1] = {0}

Skeletal animation bug with Assimp in DirectX 12

I am using Assimp to load an FBX model with animation (created in Blender) into my DirectX 12 game, but I'm experiencing a very frustrating bug with the animation rendered by the game application.
The test model is a simple 'flagpole' containing four bones like so:
Bone0 -> Bone1 -> Bone2 -> Bone3
The model renders correctly in its rest pose when the keyframe animation is bypassed.
The model also renders and animates properly when the animation rotates the model only by the root bone (Bone0).
However, when importing a model that rotates at the first joint (i.e. at Bone1), the vertices clustered around each joint seem 'stuck' in their original positions, while the vertices surrounding the 'bones' proper appear to follow through with the correct animation.
The result is a crappy zigzag of stretched geometry like so:
Instead the model should resemble an 'allen-key' shape at the end of its animation pose, as shown by the same model rendered in the AssimpViewer utility tool:
Since the model is rendering correctly in AssimpViewer, it's reasonable to assume there are no issues with the FBX file exported by Blender. I then checked and confirmed that the vertices 'stuck' around the joints did indeed have their vertex weights correctly assigned by the game loading code.
The C++ model loading and animation code is based on the popular OGLDev tutorial: https://ogldev.org/www/tutorial38/tutorial38.html
Now the infuriating thing is, since the AssimpViewer tool was correctly rendering the model animation, I also copied in the SceneAnimator and AnimEvaluator classes from that tool to generate the final bone transforms via that code branch as well... only to end up with exactly the same zigzag bug in the game!
I'm reasonably confident there aren't any issues with finding the bone hierarchy structure at initialization, so here are the key functions that traverse the hierarchy and interpolate key frames each frame.
VOID Mesh::ReadNodeHeirarchy(FLOAT animationTime, CONST aiNode* pNode, CONST aiAnimation* pAnim, CONST aiMatrix4x4 parentTransform)
{
std::string nodeName(pNode->mName.data);
// nodeTransform is a relative transform to parent node space
aiMatrix4x4 nodeTransform = pNode->mTransformation;
CONST aiNodeAnim* pNodeAnim = FindNodeAnim(pAnim, nodeName);
if (pNodeAnim)
{
// Interpolate scaling and generate scaling transformation matrix
aiVector3D scaling(1.f, 1.f, 1.f);
CalcInterpolatedScaling(scaling, animationTime, pNodeAnim);
// Interpolate rotation and generate rotation transformation matrix
aiQuaternion rotationQ (1.f, 0.f, 0.f, 0.f);
CalcInterpolatedRotation(rotationQ, animationTime, pNodeAnim);
// Interpolate translation and generate translation transformation matrix
aiVector3D translat(0.f, 0.f, 0.f);
CalcInterpolatedPosition(translat, animationTime, pNodeAnim);
// build the SRT transform matrix
nodeTransform = aiMatrix4x4(rotationQ.GetMatrix());
nodeTransform.a1 *= scaling.x; nodeTransform.b1 *= scaling.x; nodeTransform.c1 *= scaling.x;
nodeTransform.a2 *= scaling.y; nodeTransform.b2 *= scaling.y; nodeTransform.c2 *= scaling.y;
nodeTransform.a3 *= scaling.z; nodeTransform.b3 *= scaling.z; nodeTransform.c3 *= scaling.z;
nodeTransform.a4 = translat.x; nodeTransform.b4 = translat.y; nodeTransform.c4 = translat.z;
}
aiMatrix4x4 globalTransform = parentTransform * nodeTransform;
if (m_boneMapping.find(nodeName) != m_boneMapping.end())
{
UINT boneIndex = m_boneMapping[nodeName];
// the global inverse transform returns us to mesh space!!!
m_boneInfo[boneIndex].FinalTransform = m_globalInverseTransform * globalTransform * m_boneInfo[boneIndex].BoneOffset;
//m_boneInfo[boneIndex].FinalTransform = m_boneInfo[boneIndex].BoneOffset * globalTransform * m_globalInverseTransform;
m_shaderTransforms[boneIndex] = aiMatrixToSimpleMatrix(m_boneInfo[boneIndex].FinalTransform);
}
for (UINT i = 0u; i < pNode->mNumChildren; i++)
{
ReadNodeHeirarchy(animationTime, pNode->mChildren[i], pAnim, globalTransform);
}
}
VOID Mesh::CalcInterpolatedRotation(aiQuaternion& out, FLOAT animationTime, CONST aiNodeAnim* pNodeAnim)
{
UINT rotationKeys = pNodeAnim->mNumRotationKeys;
// we need at least two values to interpolate...
if (rotationKeys == 1u)
{
CONST aiQuaternion& key = pNodeAnim->mRotationKeys[0u].mValue;
out = key;
return;
}
UINT rotationIndex = FindRotation(animationTime, pNodeAnim);
UINT nextRotationIndex = (rotationIndex + 1u) % rotationKeys;
assert(nextRotationIndex < rotationKeys);
CONST aiQuatKey& key = pNodeAnim->mRotationKeys[rotationIndex];
CONST aiQuatKey& nextKey = pNodeAnim->mRotationKeys[nextRotationIndex];
FLOAT deltaTime = FLOAT(nextKey.mTime) - FLOAT(key.mTime);
FLOAT factor = (animationTime - FLOAT(key.mTime)) / deltaTime;
assert(factor >= 0.f && factor <= 1.f);
aiQuaternion::Interpolate(out, key.mValue, nextKey.mValue, factor);
}
I've just included the rotation interpolation here, since the scaling and translation functions are identical. For those unaware, Assimp's aiMatrix4x4 type follows a column-vector math convention, so I haven't messed with original matrix multiplication order.
About the only deviation between my code and the two Assimp-based code branches I've adopted is the requirement to convert the final transforms from aiMatrix4x4 types into a DirectXTK SimpleMath Matrix (really an XMMATRIX) with this conversion function:
Matrix Mesh::aiMatrixToSimpleMatrix(CONST aiMatrix4x4 m)
{
return Matrix
(m.a1, m.a2, m.a3, m.a4,
m.b1, m.b2, m.b3, m.b4,
m.c1, m.c2, m.c3, m.c4,
m.d1, m.d2, m.d3, m.d4);
}
Because of the column-vector orientation of aiMatrix4x4 Assimp matrices, the final bone transforms are not transposed for HLSL consumption. The array of final bone transforms are passed to the skinning vertex shader constant buffer as follows.
commandList->SetPipelineState(m_psoForwardSkinned.Get()); // set PSO
// Update vertex shader with current bone transforms
CONST std::vector<Matrix> transforms = m_assimpModel.GetShaderTransforms();
VSBonePassConstants vsBoneConstants{};
for (UINT i = 0; i < m_assimpModel.GetNumBones(); i++)
{
// We do not transpose bone matrices for HLSL because the original
// Assimp matrices are column-vector matrices.
vsBoneConstants.boneTransforms[i] = transforms[i];
//vsBoneConstants.boneTransforms[i] = transforms[i].Transpose();
//vsBoneConstants.boneTransforms[i] = Matrix::Identity;
}
GraphicsResource vsBoneCB = m_graphicsMemory->AllocateConstant(vsBoneConstants);
vsPerObjects.gWorld = m_assimp_world.Transpose(); // vertex shader per object constant
vsPerObjectCB = m_graphicsMemory->AllocateConstant(vsPerObjects);
commandList->SetGraphicsRootConstantBufferView(RootParameterIndex::VSBoneConstantBuffer, vsBoneCB.GpuAddress());
commandList->SetGraphicsRootConstantBufferView(RootParameterIndex::VSPerObjConstBuffer, vsPerObjectCB.GpuAddress());
//commandList->SetGraphicsRootDescriptorTable(RootParameterIndex::ObjectSRV, m_shaderTextureHeap->GetGpuHandle(ShaderTexDescriptors::SuzanneDiffuse));
commandList->SetGraphicsRootDescriptorTable(RootParameterIndex::ObjectSRV, m_shaderTextureHeap->GetGpuHandle(ShaderTexDescriptors::DefaultDiffuse));
for (UINT i = 0; i < m_assimpModel.GetMeshSize(); i++)
{
commandList->IASetVertexBuffers(0u, 1u, &m_assimpModel.meshEntries[i].GetVertexBufferView());
commandList->IASetIndexBuffer(&m_assimpModel.meshEntries[i].GetIndexBufferView());
commandList->IASetPrimitiveTopology(D3D_PRIMITIVE_TOPOLOGY_TRIANGLELIST);
commandList->DrawIndexedInstanced(m_assimpModel.meshEntries[i].GetIndexCount(), 1u, 0u, 0u, 0u);
}
Please note I am using the Graphics Resource memory management helper object found in the DirectXTK12 library in the code above. Finally, here's the skinning vertex shader I'm using.
// Luna (2016) lighting model adapted from Moller
#define MAX_BONES 4
// vertex shader constant data that varies per object
cbuffer cbVSPerObject : register(b3)
{
float4x4 gWorld;
//float4x4 gTexTransform;
}
// vertex shader constant data that varies per frame
cbuffer cbVSPerFrame : register(b5)
{
float4x4 gViewProj;
float4x4 gShadowTransform;
}
// bone matrix constant data that varies per object
cbuffer cbVSBonesPerObject : register(b9)
{
float4x4 gBoneTransforms[MAX_BONES];
}
struct VertexIn
{
float3 posL : SV_POSITION;
float3 normalL : NORMAL;
float2 texCoord : TEXCOORD0;
float3 tangentU : TANGENT;
float4 boneWeights : BONEWEIGHT;
uint4 boneIndices : BONEINDEX;
};
struct VertexOut
{
float4 posH : SV_POSITION;
//float3 posW : POSITION;
float4 shadowPosH : POSITION0;
float3 posW : POSITION1;
float3 normalW : NORMAL;
float2 texCoord : TEXCOORD0;
float3 tangentW : TANGENT;
};
VertexOut VS_main(VertexIn vin)
{
VertexOut vout = (VertexOut)0.f;
// Perform vertex skinning.
// Ignore BoneWeights.w and instead calculate the last weight value
// to ensure all bone weights sum to unity.
float4 weights = vin.boneWeights;
//weights.w = 1.f - dot(weights.xyz, float3(1.f, 1.f, 1.f));
//float4 weights = { 0.f, 0.f, 0.f, 0.f };
//weights.x = vin.boneWeights.x;
//weights.y = vin.boneWeights.y;
//weights.z = vin.boneWeights.z;
weights.w = 1.f - (weights.x + weights.y + weights.z);
float4 localPos = float4(vin.posL, 1.f);
float3 localNrm = vin.normalL;
float3 localTan = vin.tangentU;
float3 objPos = mul(localPos, (float4x3)gBoneTransforms[vin.boneIndices.x]).xyz * weights.x;
objPos += mul(localPos, (float4x3)gBoneTransforms[vin.boneIndices.y]).xyz * weights.y;
objPos += mul(localPos, (float4x3)gBoneTransforms[vin.boneIndices.z]).xyz * weights.z;
objPos += mul(localPos, (float4x3)gBoneTransforms[vin.boneIndices.w]).xyz * weights.w;
float3 objNrm = mul(localNrm, (float3x3)gBoneTransforms[vin.boneIndices.x]) * weights.x;
objNrm += mul(localNrm, (float3x3)gBoneTransforms[vin.boneIndices.y]) * weights.y;
objNrm += mul(localNrm, (float3x3)gBoneTransforms[vin.boneIndices.z]) * weights.z;
objNrm += mul(localNrm, (float3x3)gBoneTransforms[vin.boneIndices.w]) * weights.w;
float3 objTan = mul(localTan, (float3x3)gBoneTransforms[vin.boneIndices.x]) * weights.x;
objTan += mul(localTan, (float3x3)gBoneTransforms[vin.boneIndices.y]) * weights.y;
objTan += mul(localTan, (float3x3)gBoneTransforms[vin.boneIndices.z]) * weights.z;
objTan += mul(localTan, (float3x3)gBoneTransforms[vin.boneIndices.w]) * weights.w;
vin.posL = objPos;
vin.normalL = objNrm;
vin.tangentU.xyz = objTan;
//vin.posL = posL;
//vin.normalL = normalL;
//vin.tangentU.xyz = tangentL;
// End vertex skinning
// transform to world space
float4 posW = mul(float4(vin.posL, 1.f), gWorld);
vout.posW = posW.xyz;
// assumes nonuniform scaling, otherwise needs inverse-transpose of world matrix
vout.normalW = mul(vin.normalL, (float3x3)gWorld);
vout.tangentW = mul(vin.tangentU, (float3x3)gWorld);
// transform to homogenous clip space
vout.posH = mul(posW, gViewProj);
// pass texcoords to pixel shader
vout.texCoord = vin.texCoord;
//float4 texC = mul(float4(vin.TexC, 0.0f, 1.0f), gTexTransform);
//vout.TexC = mul(texC, gMatTransform).xy;
// generate projective tex-coords to project shadow map onto scene
vout.shadowPosH = mul(posW, gShadowTransform);
return vout;
}
Some last tests I tried before posting:
I tested the code with a Collada (DAE) model exported from Blender, only to observe the same distorted zigzagging in the Win32 desktop application.
I also confirmed the aiScene object for the loaded model returns an identity matrix for the global root transform (also verified in AssimpViewer).
I have stared at this code for about a week and am going out of my mind! Really hoping someone can spot what I have missed. If you need more code or info, please ask!
This seems to be a bug with the published code in the tutorials / documentation. It would be great if you could open an issue-report here: Assimp-Projectpage on GitHub .
It's taken almost another two weeks of pain, but I finally found the bug. It was in my own code, and it was self-inflicted. Before I show the solution, I should explain the further troubleshooting I did to get there.
After losing faith with Assimp (even though the AssimpViewer tool was animating my model correctly), I turned to the FBX SDK. The FBX ViewScene command line utility tool that's available as part of the SDK was also showing and animating my model properly, so I had hope...
So after a few days reviewing the FBX SDK tutorials, and taking another week to write an FBX importer for my Windows desktop game, I loaded my model and... saw exactly the same zig-zag animation anomaly as the version loaded by Assimp!
This frustrating outcome meant I could at least eliminate Assimp and the FBX SDK as the source of the problem, and focus again on the vertex shader. The shader I'm using for vertex skinning was adopted from the 'Character Animation' chapter of Frank Luna's text. It was identical in every way, which led me to recheck the C++ vertex structure declared on the application side...
Here's the C++ vertex declaration for skinned vertices:
struct Vertex
{
// added constructors
Vertex() = default;
Vertex(FLOAT x, FLOAT y, FLOAT z,
FLOAT nx, FLOAT ny, FLOAT nz,
FLOAT u, FLOAT v,
FLOAT tx, FLOAT ty, FLOAT tz) :
Pos(x, y, z),
Normal(nx, ny, nz),
TexC(u, v),
Tangent(tx, ty, tz) {}
Vertex(DirectX::SimpleMath::Vector3 pos,
DirectX::SimpleMath::Vector3 normal,
DirectX::SimpleMath::Vector2 texC,
DirectX::SimpleMath::Vector3 tangent) :
Pos(pos), Normal(normal), TexC(texC), Tangent(tangent) {}
DirectX::SimpleMath::Vector3 Pos;
DirectX::SimpleMath::Vector3 Normal;
DirectX::SimpleMath::Vector2 TexC;
DirectX::SimpleMath::Vector3 Tangent;
FLOAT BoneWeights[4];
BYTE BoneIndices[4];
//UINT BoneIndices[4]; <--- YOU HAVE CAUSED ME A MONTH OF PAIN
};
Quite early on, being confused by Luna's use of BYTE to store the array of bone indices, I changed this structure element to UINT, figuring this still matched the input declaration shown here:
static CONST D3D12_INPUT_ELEMENT_DESC inputElementDescSkinned[] =
{
{ "SV_POSITION", 0u, DXGI_FORMAT_R32G32B32_FLOAT, 0u, D3D12_APPEND_ALIGNED_ELEMENT, D3D12_INPUT_CLASSIFICATION_PER_VERTEX_DATA, 0u },
{ "NORMAL", 0u, DXGI_FORMAT_R32G32B32_FLOAT, 0u, D3D12_APPEND_ALIGNED_ELEMENT, D3D12_INPUT_CLASSIFICATION_PER_VERTEX_DATA, 0u },
{ "TEXCOORD", 0u, DXGI_FORMAT_R32G32_FLOAT, 0u, D3D12_APPEND_ALIGNED_ELEMENT, D3D12_INPUT_CLASSIFICATION_PER_VERTEX_DATA, 0u },
{ "TANGENT", 0u, DXGI_FORMAT_R32G32B32_FLOAT, 0u, D3D12_APPEND_ALIGNED_ELEMENT, D3D12_INPUT_CLASSIFICATION_PER_VERTEX_DATA, 0u },
//{ "BINORMAL", 0, DXGI_FORMAT_R32G32B32_FLOAT, 0, D3D12_APPEND_ALIGNED_ELEMENT, D3D12_INPUT_CLASSIFICATION_PER_VERTEX_DATA, 0 },
{ "BONEWEIGHT", 0u, DXGI_FORMAT_R32G32B32A32_FLOAT, 0u, D3D12_APPEND_ALIGNED_ELEMENT, D3D12_INPUT_CLASSIFICATION_PER_VERTEX_DATA, 0u },
{ "BONEINDEX", 0u, DXGI_FORMAT_R8G8B8A8_UINT, 0u, D3D12_APPEND_ALIGNED_ELEMENT, D3D12_INPUT_CLASSIFICATION_PER_VERTEX_DATA, 0u },
};
Here was the bug. By declaring UINT in the vertex structure for bone indices, four bytes were being assigned to store each bone index. But in the vertex input declaration, the DXGI_FORMAT_R8G8B8A8_UINT format specified for the "BONEINDEX" was assigning one byte per index. I suspect this data type and format size mismatch was resulting in only one valid bone index being able to fit in the BONEINDEX element, and so only one index value was passed to the vertex shader each frame, instead of the whole array of four indices for correct bone transform lookups.
So now I've learned... the hard way... why Luna had declared an array of BYTE for bone indices in the original C++ vertex structure.
I hope this experience will be of value to someone else, and always be careful changing code from your original learning sources.

Custom Delaunay Refinement with CGAL Delaunay3D

I want to perform a custom refinement strategy in a tetrahedral mesh.My input is a point cloud and I have tetrahedralized it using Delaunay 3D routine available in CGAL. The points have scalar values associated with it. Now I want to refine the tetrahedral mesh with this following strategy:
1. Get the maximum value among the vertices of each tetrahedra.
2. Get the value at the point that is going to be inserted (May be barycentre, weighted centroid or circumcenter).
3. If the difference is large enough add this point.
Any idea how to do this effectively? Note: I do not require 0-1 dimensional feature preservation.
I have already tried the above strategy. Let me show what I have done so far.
// Assume T is Delaunay_3D triangulation CGAL mesh and I have an oracle f that tells me what is the value at the point that is going to be inserted if conditions are met.
bool updated = true;
int it = 0;
while (updated)
{
updated = false;
std::vector<std::pair<Point, unsigned> > point_to_be_inserted;
for (auto cit = T.finite_cells_begin(); cit != T.finite_cells_end(); cit++)
{
Cell_handle c = cit;
Point v = Maximum valued vertex
Point q = Point that is going to be inserted
double val_at_new_pt = oracle(q, &pts, var);
double ratio = std::abs(max_val - val_at_new_pt) / max_val;
if (ratio > threshold) {
point_to_be_inserted.emplace_back(std::make_pair(q, new_pt_ind));
updated = true;
}
}
if (updated)
{
std::cout << "Total pts inserted in it: " << it << " " << point_to_be_inserted.size() << std::endl;
T.insert(point_to_be_inserted.begin(), point_to_be_inserted.end())
}
}
The problem is it is quite slow (each time iterating through all the cells). I am not finding any effective strategy to do the refinement locally. I tried using a queue but the cell_handles are getting messed up after I perform one iteration of refinement. I cannot have a map that tells me whether the tetrahedra is refined or not because each time after insertion of new points cell_handles are getting created. Any help will be appreciated. Thanks in advance.

How can I read individual pixels from a CVPixelBuffer

AVDepthData gives me a CVPixelBuffer of depth data. But I can't find a way to easily access the depth information in this CVPixelBuffer. Is there a simple recipe in Objective-C to do so?
You have to use the CVPixelBuffer APIs to get the right format to access the data via unsafe pointer manipulations. Here is the basic way:
CVPixelBufferRef pixelBuffer = _lastDepthData.depthDataMap;
CVPixelBufferLockBaseAddress(pixelBuffer, 0);
size_t cols = CVPixelBufferGetWidth(pixelBuffer);
size_t rows = CVPixelBufferGetHeight(pixelBuffer);
Float32 *baseAddress = CVPixelBufferGetBaseAddress( pixelBuffer );
// This next step is not necessary, but I include it here for illustration,
// you can get the type of pixel format, and it is associated with a kCVPixelFormatType
// this can tell you what type of data it is e.g. in this case Float32
OSType type = CVPixelBufferGetPixelFormatType( pixelBuffer);
if (type != kCVPixelFormatType_DepthFloat32) {
NSLog(#"Wrong type");
}
// Arbitrary values of x and y to sample
int x = 20; // must be lower that cols
int y = 30; // must be lower than rows
// Get the pixel. You could iterate here of course to get multiple pixels!
int baseAddressIndex = y * (int)cols + x;
const Float32 pixel = baseAddress[baseAddressIndex];
CVPixelBufferUnlockBaseAddress( pixelBuffer, 0 );
Note that the first thing you need to determine is what type of data is in the CVPixelBuffer - if you don't know this then you can use CVPixelBufferGetPixelFormatType() to find out. In this case I am getting depth data at Float32, if you were using another type e.g. Float16, then you would need to replace all occurrences of Float32 with that type.
Note that it's important to lock and unlock the base address using CVPixelBufferLockBaseAddress and CVPixelBufferUnlockBaseAddress.

Compile error when trying to access StructuredBuffer

I want to access a StructuredBuffer<int>in a compute shader but I get the error:
Shader error in 'Particle.compute': array, matrix, vector, or indexable object type expected in index expression at Particle.compute(28) (on d3d11)
The code:
#pragma kernel CSMain
#include "Assets/Uplus/ZCommon/Resources/ImageProcessing/UplusDirectCompute.cginc"
struct Particle
{
float3 Position;
float Mass;
};
Texture2D<float2> _terTx;
ConsumeStructuredBuffer<Particle> currentBuffer;
AppendStructuredBuffer<Particle> nextBuffer;
StructuredBuffer<int> particleCount;
float3 _terPos;
float _terSize, _terPhysicalScale, _resolution;
SamplerState _LinearClamp;
SamplerState _LinearRepeat;
#define _gpSize 512
[numthreads(_gpSize, 1, 1)]
void CSMain(uint3 dispatchID : SV_DispatchThreadID)
{
int flatID = dispatchID.x;
int particleCount = particleCount[0];
if (flatID >= particleCount) return;
Particle particle = currentBuffer.Consume();
//Commented the rest of code
nextBuffer.Append(particle);
}
The error points the line int particleCount = particleCount[0];. Why is that?
The whole idea behind the shader is we have two buffers. We fill one with some data (we call each of them Particle) from CPU and then in the shader consume the data from the buffer, process it and then append to another buffer. then we swap buffers and do another iteration. The particleCount buffer holds the current count of Particles that the buffer holds and the if clause prevents from consuming more Particles than available.
This is an old question so I assume you solved it, but here is the answer anyway:
You are declaring particleCount as an int when it already is a buffer.
Either change the name to int currentParticleCount = particleCount[0]; or just don't use a temporary variable:
if (flatID >= particleCount[0]) return;