How to convert an xarray to std::vector? - stdvector

The docs make it quite clear on how to adapt a std::vector to a tensor object.
https://xtensor.readthedocs.io/en/latest/adaptor.html
std::vector<double> v = {1., 2., 3., 4., 5., 6. };
std::vector<std::size_t> shape = { 2, 3 };
auto a1 = xt::adapt(v, shape);
But how can you do it for the other way round?
xt::xarray<double> a2 = { { 1., 2., 3.} };
std::vector<double> a2vector = ?;

You can construct a std::vector from iterators. For your example:
std::vector<double> w(a1.begin(), a1.end());
The complete example then becomes:
#include <vector>
#include <xtensor/xadapt.hpp>
#include <xtensor/xio.hpp>
int main()
{
std::vector<double> v = {1., 2., 3., 4., 5., 6.};
std::vector<std::size_t> shape = {2, 3};
auto a1 = xt::adapt(v, shape);
std::vector<double> w(a1.begin(), a1.end());
return 0;
}
References:
std::vector.
Constructors of std::vector (number (5) is the one relevant here).
xtensor documentation section 1.7.1 Adapting std::vector

Unfortunately Tom de Geus' answer does not maintain dimensionality and hence transforms the xarray of shape {2, 3} into a vector of size 6.
I stepped over this question, when attempting to construct a nested vector in order to plot a xarray with matplotlibcpp. For me it turned out, that Eigen::Matrix.. is a way more suitable class for this purpose. For the 2 dimensional case, one can comfortable convert the Eigen::Matrix to a nested std::vector. For higher dimensions, its worth to have a look here.
Code
transforms xt::xarray to Eigen::MatrixXf to nested std::vector
#include "xtensor/xarray.hpp"
#include "xtensor/xio.hpp"
#include <Eigen/Dense>
//https://stackoverflow.com/questions/8443102/convert-eigen-matrix-to-c-array
Eigen::MatrixXf xarray_to_matrixXf(xt::xarray<float> arr)
{
auto shape = arr.shape();
int nrows = shape[0];
int ncols = shape[1];
Eigen::MatrixXf mat = Eigen::Map<Eigen::MatrixXf>(arr.data(), nrows, ncols);
return mat;
}
// https://stackoverflow.com/a/29243033/7128154
std::vector<std::vector<float>> matrixXf2d_to_vector(Eigen::MatrixXf mat)
{
std::vector<std::vector<float>> vec;
for (int i=0; i<mat.rows(); ++i)
{
const float* begin = &mat.row(i).data()[0];
vec.push_back(std::vector<float>(begin, begin+mat.cols()));
}
return vec;
}
// print a vector
// https://stackoverflow.com/a/31130991/7128154
template<typename T1>
std::ostream& operator <<( std::ostream& out, const std::vector<T1>& object )
{
out << "[";
if ( !object.empty() )
{
for(typename std::vector<T1>::const_iterator
iter = object.begin();
iter != --object.end();
++iter) {
out << *iter << ", ";
}
out << *--object.end();
}
out << "]";
return out;
}
int main()
{
xt::xarray<float> xArr {{nan(""), 9}, {5, -6}, {1, 77}};
std::cout << "xt::xarray<float> xArr = \n" << xArr << std::endl;
Eigen::MatrixXf eigMat = xarray_to_matrixXf(xArr);
std::cout << "Eigen::MatrixXf eigMat = \n" << eigMat << std::endl;
std::vector<std::vector<float>> vec = matrixXf2d_to_vector(eigMat);
std::cout << "std::vector<std::vector<float>> vec = " << vec << std::endl;
return 0;
}
Output
xt::xarray<float> xArr =
{{nan., 9.},
{ 5., -6.},
{ 1., 77.}}
Eigen::MatrixXf eigMat =
nan -6
9 1
5 77
std::vector<std::vector<float>> vec = [[nan, 9], [9, 5], [5, -6]]

Related

Assimp does not correctly load multiple meshes from one obj file

The obj files I am trying to load have multiple -o flags, so there are multiple meshes. I am trying to load them into only 1 VAO, and I will draw them by first recording each mesh's offset and size. I have noted that the offset and size are in terms of number of vertices instead of faces, so they are multiplied by 3. For example, the first mesh starts at offset 0, and its size is mesh1's mNumberFaces * 3, and the second mesh starts at offset mesh1's mNumberFaces * 3, and its size is mesh2's mNumberFaces * 3. However, it seems only the first mesh is drawn correctly, and the rest of the meshes are all distorted somehow.
This is my loading logic:
Object* obj = new Object(objName);
// Initialize the meshes in the obj file one by one
std::vector<glm::vec3> vert, norm;
std::vector<glm::vec2> text;
std::vector<glm::ivec3> indices;
int vertexOffset = 0;
std::cout << objName << " numMeshes: " << pScene->mNumMeshes << std::endl;
for (unsigned int i = 0; i < pScene->mNumMeshes; i++) {
std::cout << objName << ": vOffset " << vertexOffset << " numV " << pScene->mMeshes[i]->mNumFaces * 3 << std::endl;
aiMesh* pMesh = pScene->mMeshes[i];
aiVector3D Zero3D(0.0f, 0.0f, 0.0f);
for (unsigned int j = 0; j < pMesh->mNumVertices; j++) {
vert.push_back(glm::vec3(pMesh->mVertices[j].x, pMesh->mVertices[j].y, pMesh->mVertices[j].z));
norm.push_back(glm::vec3(pMesh->mNormals[j].x, pMesh->mNormals[j].y, pMesh->mNormals[j].z));
aiVector3D textCoord = pMesh->HasTextureCoords(0) ? pMesh->mTextureCoords[0][j] : Zero3D;
text.push_back(glm::vec2(textCoord.x, textCoord.y));
}
for (unsigned int j = 0; j < pMesh->mNumFaces; j++) {
aiFace face = pMesh->mFaces[j];
indices.push_back(glm::ivec3(face.mIndices[0], face.mIndices[1], face.mIndices[2]));
}
aiMaterial* mtl = pScene->mMaterials[pMesh->mMaterialIndex];
std::string meshName = std::string(pMesh->mName.C_Str());
Mesh* mesh = new Mesh(meshName, loadMaterial(mtl), vertexOffset, pMesh->mNumFaces * 3);
obj->meshList.push_back(mesh);
vertexOffset = vertexOffset + 3 * pMesh->mNumFaces;
}
//create the obj's node structure
//obj->root = processNode(pScene->mRootNode, obj->meshList);
//send the data to the gpu
GLuint vao;
GLuint vbo[3];
GLuint ebo;
glcheck(glGenVertexArrays(1, &vao));
glcheck(glBindVertexArray(vao));
glcheck(glGenBuffers(3, vbo));
glcheck(glBindBuffer(GL_ARRAY_BUFFER, vbo[0]));
glcheck(glBufferData(GL_ARRAY_BUFFER, sizeof(glm::vec3) * vert.size(), vert.data(), GL_STATIC_DRAW));
glcheck(glEnableVertexAttribArray(0));
glcheck(glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 0, 0));
glcheck(glBindBuffer(GL_ARRAY_BUFFER, vbo[1]));
glcheck(glBufferData(GL_ARRAY_BUFFER, sizeof(glm::vec3) * norm.size(), norm.data(), GL_STATIC_DRAW));
glcheck(glEnableVertexAttribArray(1));
glcheck(glVertexAttribPointer(1, 3, GL_FLOAT, GL_FALSE, 0, 0));
glcheck(glBindBuffer(GL_ARRAY_BUFFER, vbo[2]));
glcheck(glBufferData(GL_ARRAY_BUFFER, sizeof(glm::vec2) * text.size(), text.data(), GL_STATIC_DRAW));
glcheck(glEnableVertexAttribArray(2));
glcheck(glVertexAttribPointer(2, 2, GL_FLOAT, GL_FALSE, 0, 0));
glcheck(glGenBuffers(1, &ebo));
glcheck(glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, ebo));
glcheck(glBufferData(GL_ELEMENT_ARRAY_BUFFER, sizeof(glm::ivec3) * indices.size(), indices.data(), GL_STATIC_DRAW));
// Unbind the VBO/VAO
glcheck(glBindVertexArray(0));
//glcheck(glBindBuffer(GL_ARRAY_BUFFER, 0));
//glcheck(glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0));
obj->vao = vao; //shared vao variable
objMap[objName] = obj;
objList.push_back(obj);
return obj;
This is my drawing logic:
for (int i = 0; i < instObj->meshList.size(); i++) {
Mesh* mesh = instObj->meshList[i];
glcheck(glDrawElements(GL_TRIANGLES, mesh->size, GL_UNSIGNED_INT, (GLvoid*)(sizeof(GLuint) * mesh->vertexOffset)));
}
This is the first mesh, which is drawn correctly first mesh
The second mesh and onward are all messed up however, second mesh
The complete mesh enter image description here

How do I traverse a Tensorflow graph using the C API?

A small program below creates a simple tf graph. I need to traverse the graph, printing information about the nodes as I go.
Is it right to assume that every graph has a root (or distinguished node)? I believe this graph has 3 nodes and I've heard that the edges are tensors.
#include<stdio.h>
#include<stdlib.h>
#include<string.h>
#include"tensorflow/c/c_api.h"
TF_Graph* g;
TF_Status* s;
#define CHECK_OK(x) if(TF_OK != TF_GetCode(s))return printf("%s\n",TF_Message(s)),(void*)0
TF_Tensor* FloatTensor2x2(const float* values) {
const int64_t dims[2] = {2, 2};
TF_Tensor* t = TF_AllocateTensor(TF_FLOAT, dims, 2, sizeof(float) * 4);
memcpy(TF_TensorData(t), values, sizeof(float) * 4);
return t;
}
TF_Operation* FloatConst2x2(TF_Graph* graph, TF_Status* s, const float* values, const char* name) {
TF_Tensor* tensor=FloatTensor2x2(values);
TF_OperationDescription* desc = TF_NewOperation(graph, "Const", name);
TF_SetAttrTensor(desc, "value", tensor, s);
if (TF_GetCode(s) != TF_OK) return 0;
TF_SetAttrType(desc, "dtype", TF_FLOAT);
TF_Operation* op = TF_FinishOperation(desc, s);
CHECK_OK(s);
return op;
}
TF_Operation* MatMul(TF_Graph* graph, TF_Status* s, TF_Operation* l, TF_Operation* r, const char* name,
char transpose_a, char transpose_b) {
TF_OperationDescription* desc = TF_NewOperation(graph, "MatMul", name);
if (transpose_a) {
TF_SetAttrBool(desc, "transpose_a", 1);
}
if (transpose_b) {
TF_SetAttrBool(desc, "transpose_b", 1);
}
TF_AddInput(desc,(TF_Output){l, 0});
TF_AddInput(desc,(TF_Output){r, 0});
TF_Operation* op = TF_FinishOperation(desc, s);
CHECK_OK(s);
return op;
}
TF_Graph* BuildSuccessGraph(TF_Output* inputs, TF_Output* outputs) {
// |
// z|
// |
// MatMul
// / \
// ^ ^
// | |
// x Const_0 y Const_1
//
float const0_val[] = {1.0, 2.0, 3.0, 4.0};
float const1_val[] = {1.0, 0.0, 0.0, 1.0};
TF_Operation* const0 = FloatConst2x2(g, s, const0_val, "Const_0");
TF_Operation* const1 = FloatConst2x2(g, s, const1_val, "Const_1");
TF_Operation* matmul = MatMul(g, s, const0, const1, "MatMul",0,0);
inputs[0] = (TF_Output){const0, 0};
inputs[1] = (TF_Output){const1, 0};
outputs[0] = (TF_Output){matmul, 0};
CHECK_OK(s);
return g;
}
int main(int argc, char const *argv[]) {
g = TF_NewGraph();
s = TF_NewStatus();
TF_Output inputs[2],outputs[1];
BuildSuccessGraph(inputs,outputs);
/* HERE traverse g -- maybe with {inputs,outputs} -- to print the graph */
fprintf(stdout, "OK\n");
}
If someone could help with what functions to use to get info about the graph, it would be appreciated.
from c_api.h:
// Iterate through the operations of a graph. To use:
// size_t pos = 0;
// TF_Operation* oper;
// while ((oper = TF_GraphNextOperation(graph, &pos)) != nullptr) {
// DoSomethingWithOperation(oper);
// }
TF_CAPI_EXPORT extern TF_Operation* TF_GraphNextOperation(TF_Graph* graph,
size_t* pos);
Note this only returns operations and does not define a way to navigate from one node (Operation) to the next - this edge relationship is stored in the nodes themselves (as pointers).

Tensorflow tfcompile: fetching gradients

I created a very simple tensorflow model where I fetch gradients:
# tf Graph Input
X = tf.placeholder(tf.float32, [1, 2], name="X")
Y = tf.placeholder(tf.float32, [1, 2], name="Y")
# Model parameter variables
W = tf.Variable([[1.0, 2.0], [3.0, 4.0]], name="weight")
B = tf.Variable([[5.0, 6.0]], name="bias")
# Construct a multivariate linear model
matmul = tf.matmul(X, W, name="matrixMul")
pred = tf.add(matmul, B, name="addition")
# Mean squared error
cost = tf.reduce_sum(tf.pow(pred-Y, 2) / 2 )
# Fetch gradients
grads = tf.gradients(cost, [W, B])
I exported this graph into a protobuf and now I use tfcompile for AOT compilation. I want to use the compiled graph in a C++ program and fetch the computed gradients.
The config file for tfcompile looks like:
feed {
id { node_name: "X" }
shape {
dim { size: 1 }
dim { size: 2 }
}
name: "x"
}
feed {
id { node_name: "Y" }
shape {
dim { size: 1 }
dim { size: 2 }
}
name: "y"
}
feed {
id { node_name: "weight" }
shape {
dim { size: 2 }
dim { size: 2 }
}
name: "w"
}
feed {
id { node_name: "bias" }
shape {
dim { size: 1 }
dim { size: 2 }
}
name: "b"
}
fetch {
id { node_name: "addition"}
name: "prediction"
}
fetch {
id { node_name: "gradients/matrixMul_grad/MatMul_1"}
name: "weight_grad"
}
fetch {
id { node_name: "gradients/addition_grad/Reshape"}
name: "bias_grad"
}
Finally I run this C++ code:
obj.set_arg_x_data(x.data());
obj.set_arg_y_data(y.data());
obj.set_arg_w_data(w.data());
obj.set_arg_b_data(b.data());
obj.Run();
std::cout << "result_prediction =" << std::endl << obj.result_prediction(0,0) << " " << obj.result_prediction(0,1) << std::endl;
std::cout << "result_weight_grad =" << std::endl << obj.result_weight_grad(0,0) << " " << obj.result_weight_grad(0,1) << " " << obj.result_weight_grad(1,0) << " " << obj.result_weight_grad(1,1) << std::endl;
std::cout << "result_bias_grad =" << std::endl << obj.result_bias_grad(0,0) << " " << obj.result_bias_grad(0,1) << std::endl;
For result_prediction and result_bias_grad I get the expected values.
Just for result_weight_grad I get only 0,0,0,0.
Maybe I am fetching there the wrong node:
fetch {
id { node_name: "gradients/matrixMul_grad/MatMul_1"}
name: "weight_grad"
}
Does somebody tried already to fetch computed gradients? Tensorflow only offers examples where they using tfcompile for prediction.

mxnet (mshadow) getting the shape of a tensor

I'm a newbie in mshadow, I can not understand why I got those outpus from the following code snippet:
TensorContainer<cpu, 2> lhs(Shape2(2, 3));
lhs = 1.0;
printf("%u %u\n", lhs.size(0), lhs.size(1));
printf("%u %u\n", lhs[0].shape_[0], lhs[0].shape_[1]);
printf("%u %u\n", lhs[0].size(0), lhs[0].size(1));
The output is:
2 3
3 4
3 3
Why are the second and third outputs those numbers? Because lhs[0] is one-dimensional, I think they should be exactly the same, i.e. 3 0. Could anyone tell me where I was wrong? Thanks in advance!
You are right, Tensor lhs[0] is one dimensional, but to answer you question first let me show what is going on under the hood. TensorContainer does not override the [] operator, instead it uses the one from the parent (which is Tensor), more precisely the following one is called:
MSHADOW_XINLINE Tensor<Device, kSubdim, DType> operator[](index_t idx) const {
return Tensor<Device, kSubdim, DType>(dptr_ + this->MemSize<1>() * idx,
shape_.SubShape(), stride_, stream_);
}
As can be seen it creates a new Tensor on a stack. And while for the most of the cases it will create generic N-dimensional Tensor, here for the 1-dimensional case it will create a special 1-dimensional Tensor.
Now ,when we have established what exactly is returned by the operator [], let's look on the fields of that class:
DType *dptr_;
Shape<1> shape_;
index_t stride_;
As can be seen the shape_ here has only 1 dimension! so there is no shape_1, instead by calling shape_1 it will return stride_(or part of it). Here is the modification to the Tensor constructor that you can try to run and see what is actually going on there:
MSHADOW_XINLINE Tensor(DType *dptr, Shape<1> shape,
index_t stride, Stream<Device> *stream)
: dptr_(dptr), shape_(shape), stride_(stride), stream_(stream) {
std::cout << "shape[0]: " << shape[0] << std::endl; // 3
std::cout << "shape[1]: " << shape[1] << std::endl; // 0, as expected
std::cout << "_shape[0]: " << shape_[0] << std::endl; // 3, as expected
std::cout << "_shape[1]: " << shape_[1] << std::endl; // garbage (4)
std::cout << "address of _shape[1]: " << &(shape_[1]) << std::endl;
std::cout << "address of stride: " << &(stride_) << std::endl;
}
and the output:
shape[0]: 3
shape[1]: 0
_shape[0]: 3
_shape[1]: 4
address of _shape[1]: 0x7fffa28ec44c
address of stride: 0x7fffa28ec44c
_shape1 and stride have both the same address (0x7fffa28ec44c).

Comparison between 2D and 3D Affine transforms

Is it expected that the following test should fail?
The test compares results of a 2D and a 3D AffineTransformation. Both are constructed to have unit scaling and zero offsets in the y and z direction, but to have non-zero and non-unity scaling and offset in the x direction. All other off-diagonal elements are zero. It is my belief that these transformations are identical in the x and y directions, and hence should produce identical results.
Furthermore I have found that the test passes if I use this Kernel:
using K = CGAL::Exact_predicates_exact_constructions_kernel;
Is it to be expected that the test passes if I use this Kernel? Should the test fail with either kernel or pass with either kernel?
TEST(TransformerTest, testCGALAffine) {
using K = CGAL::Exact_predicates_inexact_constructions_kernel;
using Float = typename K::FT;
using Transformation_2 = K::Aff_transformation_2;
using Transformation_3 = K::Aff_transformation_3;
using Point_2 = typename K::Point_2;
using Point_3 = typename K::Point_3;
double lowerCorner(17.005142946538115);
double upperCorner(91.940521484752139);
int resolution = 48;
double tmpScaleX((upperCorner - lowerCorner) / resolution);
Float scaleX(tmpScaleX);
Float zero(0);
Float unit(1);
// create a 2D voxel to world transform
Transformation_2 transformV2W_2(scaleX, zero, Float(lowerCorner),
zero, unit, zero,
unit);
// create it's inverse: a 2D world to voxel transform
auto transformW2V_2 = transformV2W_2.inverse();
// create a 3D voxel to world transform
Transformation_3 transformV2W_3(scaleX, zero, zero, Float(lowerCorner),
zero, unit, zero, zero,
zero, zero, unit, zero,
unit);
// create it's inverse: a 3D world to voxel transform
auto transformW2V_3 = transformV2W_3.inverse();
for (int i = 0; i < 3; ++i) {
for (int j = 0; j < 2; ++j) {
EXPECT_EQ(transformV2W_2.cartesian(i, j), transformV2W_3.cartesian(i, j)) << i << ", " << j;
EXPECT_EQ(transformW2V_2.cartesian(i, j), transformW2V_3.cartesian(i, j)) << i << ", " << j;
}
}
std::mt19937_64 rng(0);
std::uniform_real_distribution<double> randReal(0, resolution);
// compare the results of 2D and 3D transformations of random locations
for (int i = 0; i < static_cast<int>(1e4); ++i) {
Float x(randReal(rng));
Float y(randReal(rng));
auto world_2 = transformV2W_2(Point_2(x, y));
auto world_3 = transformV2W_3(Point_3(x, y, 0));
EXPECT_EQ(world_2.x(), world_3.x()) << world_2 << ", " << world_3;
auto voxel_2 = transformW2V_2(world_2);
auto voxel_3 = transformW2V_3(world_3);
EXPECT_EQ(voxel_2.x(), voxel_3.x()) << voxel_2 << ", " << voxel_3;
}
}