Imlib2 error when crosscopilation for arm target - embedded

I am trying to port x11 to arm processor. so i am using Imlib2 library for jpeg pictures. I have successfully cross compiled Imlib2 library with x windows to arm.My sample program also built successfully.But when i run that binary jpeg is not displaying properly.(it shows image upload error);
#
include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <X11/Xlib.h>
#include <Imlib2.h>
int main(int argc, char **argv)
{
Imlib_Image img;
Display *dpy;
Pixmap pix;
Window root;
Screen *scn;
int width, height;
const char *filename = NULL;
if (argc < 2)
return 0;
filename = argv[1];
img = imlib_load_image(filename);
printf("img values %x",img);
if (!img) {
fprintf(stderr, "%s:Unable to load image\n", filename);
return 0;
}
imlib_context_set_image(img);
width = imlib_image_get_width();
height = imlib_image_get_height();
dpy = XOpenDisplay(NULL);
if (!dpy)
return 0;
scn = DefaultScreenOfDisplay(dpy);
root = DefaultRootWindow(dpy);
pix = XCreatePixmap(dpy, root, width, height,
DefaultDepthOfScreen(scn));
printf("caal");
imlib_context_set_display(dpy);
imlib_context_set_visual(DefaultVisualOfScreen(scn));
imlib_context_set_colormap(DefaultColormapOfScreen(scn));
imlib_context_set_drawable(pix);
imlib_render_image_on_drawable(0, 0);
Window w = XCreateSimpleWindow(dpy, root, 0, 0, width, height, 0, None, None);
XSelectInput(dpy, w, ExposureMask);
XMapWindow(dpy, w);
//XCopyPlane(dpy, pix, w, gc, 0, 0, width, height, 0, 0, DefaultDepthOfScreen(scn));
XFlush(dpy);
XSetWindowBackgroundPixmap(dpy, w, pix);
XClearWindow(dpy, root);
XEvent ev;
while (XNextEvent(dpy, &ev)) {
if( ev.type == Expose )
{
//XCopyPlane(dpy, pix, w, gc, 0, 0, width, height, 0, 0, DefaultDepthOfScreen(scn));
printf("Expose called\n");
}
}
sleep(20);
XFreePixmap(dpy, pix);
imlib_free_image();
XCloseDisplay(dpy);
return 0;
fprintf(stderr, "usage: %s <image_file>\n", argv[0]);
return 1;
}

Related

Assimp does not correctly load multiple meshes from one obj file

The obj files I am trying to load have multiple -o flags, so there are multiple meshes. I am trying to load them into only 1 VAO, and I will draw them by first recording each mesh's offset and size. I have noted that the offset and size are in terms of number of vertices instead of faces, so they are multiplied by 3. For example, the first mesh starts at offset 0, and its size is mesh1's mNumberFaces * 3, and the second mesh starts at offset mesh1's mNumberFaces * 3, and its size is mesh2's mNumberFaces * 3. However, it seems only the first mesh is drawn correctly, and the rest of the meshes are all distorted somehow.
This is my loading logic:
Object* obj = new Object(objName);
// Initialize the meshes in the obj file one by one
std::vector<glm::vec3> vert, norm;
std::vector<glm::vec2> text;
std::vector<glm::ivec3> indices;
int vertexOffset = 0;
std::cout << objName << " numMeshes: " << pScene->mNumMeshes << std::endl;
for (unsigned int i = 0; i < pScene->mNumMeshes; i++) {
std::cout << objName << ": vOffset " << vertexOffset << " numV " << pScene->mMeshes[i]->mNumFaces * 3 << std::endl;
aiMesh* pMesh = pScene->mMeshes[i];
aiVector3D Zero3D(0.0f, 0.0f, 0.0f);
for (unsigned int j = 0; j < pMesh->mNumVertices; j++) {
vert.push_back(glm::vec3(pMesh->mVertices[j].x, pMesh->mVertices[j].y, pMesh->mVertices[j].z));
norm.push_back(glm::vec3(pMesh->mNormals[j].x, pMesh->mNormals[j].y, pMesh->mNormals[j].z));
aiVector3D textCoord = pMesh->HasTextureCoords(0) ? pMesh->mTextureCoords[0][j] : Zero3D;
text.push_back(glm::vec2(textCoord.x, textCoord.y));
}
for (unsigned int j = 0; j < pMesh->mNumFaces; j++) {
aiFace face = pMesh->mFaces[j];
indices.push_back(glm::ivec3(face.mIndices[0], face.mIndices[1], face.mIndices[2]));
}
aiMaterial* mtl = pScene->mMaterials[pMesh->mMaterialIndex];
std::string meshName = std::string(pMesh->mName.C_Str());
Mesh* mesh = new Mesh(meshName, loadMaterial(mtl), vertexOffset, pMesh->mNumFaces * 3);
obj->meshList.push_back(mesh);
vertexOffset = vertexOffset + 3 * pMesh->mNumFaces;
}
//create the obj's node structure
//obj->root = processNode(pScene->mRootNode, obj->meshList);
//send the data to the gpu
GLuint vao;
GLuint vbo[3];
GLuint ebo;
glcheck(glGenVertexArrays(1, &vao));
glcheck(glBindVertexArray(vao));
glcheck(glGenBuffers(3, vbo));
glcheck(glBindBuffer(GL_ARRAY_BUFFER, vbo[0]));
glcheck(glBufferData(GL_ARRAY_BUFFER, sizeof(glm::vec3) * vert.size(), vert.data(), GL_STATIC_DRAW));
glcheck(glEnableVertexAttribArray(0));
glcheck(glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 0, 0));
glcheck(glBindBuffer(GL_ARRAY_BUFFER, vbo[1]));
glcheck(glBufferData(GL_ARRAY_BUFFER, sizeof(glm::vec3) * norm.size(), norm.data(), GL_STATIC_DRAW));
glcheck(glEnableVertexAttribArray(1));
glcheck(glVertexAttribPointer(1, 3, GL_FLOAT, GL_FALSE, 0, 0));
glcheck(glBindBuffer(GL_ARRAY_BUFFER, vbo[2]));
glcheck(glBufferData(GL_ARRAY_BUFFER, sizeof(glm::vec2) * text.size(), text.data(), GL_STATIC_DRAW));
glcheck(glEnableVertexAttribArray(2));
glcheck(glVertexAttribPointer(2, 2, GL_FLOAT, GL_FALSE, 0, 0));
glcheck(glGenBuffers(1, &ebo));
glcheck(glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, ebo));
glcheck(glBufferData(GL_ELEMENT_ARRAY_BUFFER, sizeof(glm::ivec3) * indices.size(), indices.data(), GL_STATIC_DRAW));
// Unbind the VBO/VAO
glcheck(glBindVertexArray(0));
//glcheck(glBindBuffer(GL_ARRAY_BUFFER, 0));
//glcheck(glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0));
obj->vao = vao; //shared vao variable
objMap[objName] = obj;
objList.push_back(obj);
return obj;
This is my drawing logic:
for (int i = 0; i < instObj->meshList.size(); i++) {
Mesh* mesh = instObj->meshList[i];
glcheck(glDrawElements(GL_TRIANGLES, mesh->size, GL_UNSIGNED_INT, (GLvoid*)(sizeof(GLuint) * mesh->vertexOffset)));
}
This is the first mesh, which is drawn correctly first mesh
The second mesh and onward are all messed up however, second mesh
The complete mesh enter image description here

Vulkan line drawing not showing up

So I would like to render the bounding box of a selected object. I have a buffer to store the 8 points and another buffer to store the indices to use line strip drawing of those points to make a box. I captured a frame with RenderDoc and I can see the yellow box bounding box
So it looks like the values are correct and the box is being drawn, but I do not see it in the final render to the screen.. Anyone have an idea what I might be missing here?
VkDescriptorSetLayoutBinding cameraBind = vkinit::descriptorset_layout_binding(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, VK_SHADER_STAGE_VERTEX_BIT, 0);
VkDescriptorSetLayoutCreateInfo setinfo = {};
setinfo.bindingCount = 1;
setinfo.flags = 0;
setinfo.pNext = nullptr;
setinfo.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO;
setinfo.pBindings = &cameraBind;
vkCreateDescriptorSetLayout(_device, &setinfo, nullptr, &_globalSetLayout);
//setup push constant
VkPushConstantRange push_constant;
push_constant.offset = 0;
push_constant.size = sizeof(glm::mat4);
push_constant.stageFlags = VK_SHADER_STAGE_VERTEX_BIT;
VkPipelineLayoutCreateInfo mesh_pipeline_layout_info = vkinit::pipeline_layout_create_info();
mesh_pipeline_layout_info.setLayoutCount = 1;
mesh_pipeline_layout_info.pSetLayouts = &_globalSetLayout;
mesh_pipeline_layout_info.pPushConstantRanges = &push_constant;
mesh_pipeline_layout_info.pushConstantRangeCount = 1;
vkCreatePipelineLayout(_device, &mesh_pipeline_layout_info, nullptr, &_highlightPipelineLayout);
PipelineBuilder highlightPipelineBuilder;
VertexInputDescription description;
VkVertexInputBindingDescription mainBinding = {};
mainBinding.binding = 0;
mainBinding.stride = sizeof(glm::vec3);
mainBinding.inputRate = VK_VERTEX_INPUT_RATE_VERTEX;
description.bindings.push_back(mainBinding);
//Position will be stored at Location 0
VkVertexInputAttributeDescription positionAttribute = {};
positionAttribute.binding = 0;
positionAttribute.location = 0;
positionAttribute.format = VK_FORMAT_R32G32B32_SFLOAT;
positionAttribute.offset = 0;
description.attributes.push_back(positionAttribute);
highlightPipelineBuilder._pipelineLayout = _highlightPipelineLayout;
highlightPipelineBuilder.vertexDescription = description;
highlightPipelineBuilder._inputAssembly = vkinit::input_assembly_create_info(VK_PRIMITIVE_TOPOLOGY_LINE_STRIP);
highlightPipelineBuilder._rasterizer = vkinit::rasterization_state_create_info(VK_POLYGON_MODE_LINE);
highlightPipelineBuilder._depthStencil = vkinit::depth_stencil_create_info(true, true, VK_COMPARE_OP_ALWAYS);
ShaderEffect* lineEffect = new ShaderEffect();
lineEffect->add_stage(_shaderCache.get_shader(shader_path("wk_highlight.vert.spv")), VK_SHADER_STAGE_VERTEX_BIT);
lineEffect->add_stage(_shaderCache.get_shader(shader_path("wk_highlight.frag.spv")), VK_SHADER_STAGE_FRAGMENT_BIT);
lineEffect->reflect_layout(_device, nullptr, 0);
highlightPipelineBuilder.setShaders(lineEffect);
_highlightPipeline = highlightPipelineBuilder.build_pipeline(_device, _renderPass);
and here is the drawing part
vkCmdBindPipeline(cmd, VK_PIPELINE_BIND_POINT_GRAPHICS, _highlightPipeline);
uint32_t camera_data_offset = _dynamicData.push(_camera.matrices);
VkDescriptorBufferInfo camInfo = _dynamicData.source.get_info();
camInfo.range = sizeof(GPUCameraData);
camInfo.offset = camera_data_offset;
VkDescriptorSet cameraSet;
vkutil::DescriptorBuilder::begin(_descriptorLayoutCache, _dynamicDescriptorAllocator)
.bind_buffer(0, &camInfo, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, VK_SHADER_STAGE_VERTEX_BIT)
.build(cameraSet);
vkCmdPushConstants(cmd, _highlightPipelineLayout, VK_SHADER_STAGE_VERTEX_BIT, 0, sizeof(glm::mat4), &_selectedMatrix);
VkDeviceSize offset = 0;
vkCmdBindVertexBuffers(cmd, 0, 1, &_highlightVertexBuffer._buffer, &offset);
vkCmdBindIndexBuffer(cmd, _highlightIndexBuffer._buffer, 0, VK_INDEX_TYPE_UINT16);
vkCmdBindDescriptorSets(cmd, VK_PIPELINE_BIND_POINT_GRAPHICS, _highlightPipelineLayout, 0, 1, &cameraSet, 0, nullptr);
vkCmdDrawIndexed(cmd, 16, 1, 0, 0, 0);
And the shaders
#version 450
layout (location = 0) in vec3 vPosition;
layout(set = 0, binding = 0) uniform CameraBuffer{
mat4 view;
mat4 proj;
mat4 viewproj;
} cameraData;
layout(push_constant) uniform PushConstants {
mat4 matrix;
} pushConstants;
void main()
{
mat4 modelMatrix = pushConstants.matrix;
mat4 transformMatrix = (cameraData.viewproj * modelMatrix);
gl_Position = transformMatrix * vec4(vPosition, 1.0f);
}
fragment shader
//glsl version 4.5
#version 450
//output write
layout (location = 0) out vec4 outFragColor;
void main()
{
outFragColor = vec4(1.0, 1.0, 0.0, 1.0);
}

glreadpixels return wrong value on different screen

So I have plugin that show differently on different machine. On my machine, when I pass in the image I generate with 32 * 32 * 32 value. with every 32 have red value go from 0 - 1. I get back correctly, every 32 pixel, the red value go from 0-1.
But same code, same program I run on my co-worker machine, it not every 32 pixels but 64 pixel the red value go from 0 - 1.
Is this problem related to screen resolution? how I can get back correct value that I pass in.
Example (first 32 pixel with red value)
Pass in
0, 8, 16 24, 32, 40, ... 248
Correct one (my machine)
0, 8, 16 24, .... 248
Wrong ( co-worker machine .
0, 4, 8, 12, 16 ... 124
Here is my code to create the texture base on Bit map
glActiveTexture(GL_TEXTURE0);
glEnable(GL_TEXTURE_RECTANGLE_ARB);
glBindTexture(GL_TEXTURE_RECTANGLE_ARB, _texID);
if (_texID) {
glDeleteTextures(1, &_texID);
_texID = 0;
}
glGenTextures(1, &_texID);
glTexParameteri(GL_TEXTURE_RECTANGLE_ARB, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_RECTANGLE_ARB, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_RECTANGLE_ARB, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glTexEnvf(GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_MODULATE);
glTexImage2D(GL_TEXTURE_RECTANGLE_ARB, 0, GL_RGBA8, (int)_imageSize.width, (int)_imageSize.height, 0, GL_RGBA, GL_UNSIGNED_BYTE, _image_buffer);
glClearColor(0.0, 0.0, 0.0, 0.0);
glClear(GL_COLOR_BUFFER_BIT);
Here code to generate the bit map
[self setFrameSize:_imageSize];
GLubyte al = 255; // alpha value
double increment = (al+1) / val;
_totalByteSize = val*val*val*4;
_image_buffer = new GLubyte[_totalByteSize];
for (int i = 0; i < (_totalByteSize); i++) {
_image_buffer[i] = 0;
}
vector<GLKVector3> data;
GLubyte bl = 0;
GLubyte gr = 0;
GLubyte re = 0;
for ( int b = 0; b < val; b++) {
gr = 0;
for ( int g = 0; g < val; g++) {
re = 0;
for ( int r = 0; r < val; r++) {
int ind = r + g * val + b * val * val;
_image_buffer[ind * 4 + 0] = re;
_image_buffer[ind * 4 + 1] = gr;
_image_buffer[ind * 4 + 2] = bl;
_image_buffer[ind * 4 + 3] = al;
re+= increment; // 256 / 32 = 8
}
gr+= increment; // 256 / 32 = 8
}
bl+= increment; // 256 / 32 = 8
}
And here code to read
int totalByteSize = 32*32*32*3;
GLubyte* bitmap = new GLubyte[totalByteSize];
glReadPixels(0, 0, _imageSize.width, _imageSize.height, GL_RGB, GL_UNSIGNED_BYTE, bitmap);
[_lutData removeAllObjects];
for (int i = 0 ; i <= totalByteSize /3 ; i++) {
double val1 = (double)bitmap[i*3+0] / 256.0;
double val2 = (double)bitmap[i*3+1] / 256.0;
double val3 = (double)bitmap[i*3+2] / 256.0;
[_lutData addObject:#[#(val1), #(val2), #(val3)]];
}
Does this can cause by screen with high resolution or different setting causing it to read wrong

How do I traverse a Tensorflow graph using the C API?

A small program below creates a simple tf graph. I need to traverse the graph, printing information about the nodes as I go.
Is it right to assume that every graph has a root (or distinguished node)? I believe this graph has 3 nodes and I've heard that the edges are tensors.
#include<stdio.h>
#include<stdlib.h>
#include<string.h>
#include"tensorflow/c/c_api.h"
TF_Graph* g;
TF_Status* s;
#define CHECK_OK(x) if(TF_OK != TF_GetCode(s))return printf("%s\n",TF_Message(s)),(void*)0
TF_Tensor* FloatTensor2x2(const float* values) {
const int64_t dims[2] = {2, 2};
TF_Tensor* t = TF_AllocateTensor(TF_FLOAT, dims, 2, sizeof(float) * 4);
memcpy(TF_TensorData(t), values, sizeof(float) * 4);
return t;
}
TF_Operation* FloatConst2x2(TF_Graph* graph, TF_Status* s, const float* values, const char* name) {
TF_Tensor* tensor=FloatTensor2x2(values);
TF_OperationDescription* desc = TF_NewOperation(graph, "Const", name);
TF_SetAttrTensor(desc, "value", tensor, s);
if (TF_GetCode(s) != TF_OK) return 0;
TF_SetAttrType(desc, "dtype", TF_FLOAT);
TF_Operation* op = TF_FinishOperation(desc, s);
CHECK_OK(s);
return op;
}
TF_Operation* MatMul(TF_Graph* graph, TF_Status* s, TF_Operation* l, TF_Operation* r, const char* name,
char transpose_a, char transpose_b) {
TF_OperationDescription* desc = TF_NewOperation(graph, "MatMul", name);
if (transpose_a) {
TF_SetAttrBool(desc, "transpose_a", 1);
}
if (transpose_b) {
TF_SetAttrBool(desc, "transpose_b", 1);
}
TF_AddInput(desc,(TF_Output){l, 0});
TF_AddInput(desc,(TF_Output){r, 0});
TF_Operation* op = TF_FinishOperation(desc, s);
CHECK_OK(s);
return op;
}
TF_Graph* BuildSuccessGraph(TF_Output* inputs, TF_Output* outputs) {
// |
// z|
// |
// MatMul
// / \
// ^ ^
// | |
// x Const_0 y Const_1
//
float const0_val[] = {1.0, 2.0, 3.0, 4.0};
float const1_val[] = {1.0, 0.0, 0.0, 1.0};
TF_Operation* const0 = FloatConst2x2(g, s, const0_val, "Const_0");
TF_Operation* const1 = FloatConst2x2(g, s, const1_val, "Const_1");
TF_Operation* matmul = MatMul(g, s, const0, const1, "MatMul",0,0);
inputs[0] = (TF_Output){const0, 0};
inputs[1] = (TF_Output){const1, 0};
outputs[0] = (TF_Output){matmul, 0};
CHECK_OK(s);
return g;
}
int main(int argc, char const *argv[]) {
g = TF_NewGraph();
s = TF_NewStatus();
TF_Output inputs[2],outputs[1];
BuildSuccessGraph(inputs,outputs);
/* HERE traverse g -- maybe with {inputs,outputs} -- to print the graph */
fprintf(stdout, "OK\n");
}
If someone could help with what functions to use to get info about the graph, it would be appreciated.
from c_api.h:
// Iterate through the operations of a graph. To use:
// size_t pos = 0;
// TF_Operation* oper;
// while ((oper = TF_GraphNextOperation(graph, &pos)) != nullptr) {
// DoSomethingWithOperation(oper);
// }
TF_CAPI_EXPORT extern TF_Operation* TF_GraphNextOperation(TF_Graph* graph,
size_t* pos);
Note this only returns operations and does not define a way to navigate from one node (Operation) to the next - this edge relationship is stored in the nodes themselves (as pointers).

Dri3 and Present extension on client application

I am looking for someone who has experience/knowledge on how to implement dri3 and present extension in client application.
My use case will be:
I have a dma buffer generated at my application and want to use present extension to render it to screen.
Thanks Sharvil111 and Grey for the prompt response.
I found a present test from xf86-intel-driver - test/present-test.c. I ported the below code to my app. I am getting a black screen at the end. I would expect the pixmap created is empty and therefore a black screen is expected.
Correct me if I am wrong.
static int test_dri3(Display *dpy)
{
Window win = DefaultRootWindow(dpy);
Pixmap pixmap;
Window root;
unsigned int width, height;
unsigned border, depth;
unsigned stride, size;
int x, y, ret = 1;
int device, handle;
int bpp;
device = dri3_open(dpy);
if (device < 0)
return 0;
if (!is_intel(device))
return 0;
printf("Opened Intel DRI3 device\n");
XGetGeometry(dpy, win, &root, &x, &y,
&width, &height, &border, &depth);
switch (depth) {
case 8: bpp = 8; break;
case 15: case 16: bpp = 16; break;
case 24: case 32: bpp = 32; break;
default: return 0;
}
stride = width * bpp/8;
size = PAGE_ALIGN(stride * height);
printf("Creating DRI3 %dx%d (source stride=%d, size=%d) for GTT\n",
width, height, stride, size);
pixmap = 0;
handle = gem_create(device, size);
if (handle) {
pixmap = dri3_create_pixmap(dpy, root,
width, height, depth,
gem_export(device, handle), bpp, stride, size);
gem_close(device, handle);
}
if (pixmap == 0)
goto fail;
xcb_present_pixmap(XGetXCBConnection(dpy),
win, pixmap,
0, /* sbc */
0, /* valid */
0, /* update */
0, /* x_off */
0, /* y_off */
None,
None, /* wait fence */
None,
XCB_PRESENT_OPTION_NONE,
0, /* target msc */
0, /* divisor */
0, /* remainder */
0, NULL);
XFreePixmap(dpy, pixmap);
XSync(dpy, True);
if (_x_error_occurred)
goto fail;
fail:
close(device);
return ret;
}