i am trying to use PCL with a V1 Microsoft Kinect camera to do 3D mapping. I have installed it and used Cmake to create the project using visual studio 2019 when i try to run the file "openni_grabber.cpp" i get the exception shown below and i cannot figure out what is causing it and how to fix it. I'm not very experience with PCL so any help would be very appreciated
I have tried searching and cannot find anyone else who has had this error or any info on how to fix it
The code im using is shown below
#include <pcl/point_cloud.h>
#include <pcl/point_types.h>
#include <pcl/io/openni2_grabber.h>
#include <pcl/common/time.h>
class SimpleOpenNIProcessor
{
public:
void cloud_cb_ (const pcl::PointCloud<pcl::PointXYZRGBA>::ConstPtr &cloud)
{
static unsigned count = 0;
static double last = pcl::getTime ();
if (++count == 30)
{
double now = pcl::getTime ();
std::cout << "distance of center pixel :" << cloud->points [(cloud->width >> 1) * (cloud->height + 1)].z << " mm. Average framerate: " << double(count)/double(now - last) << " Hz" << std::endl;
count = 0;
last = now;
}
}
void run ()
{
// create a new grabber for OpenNI devices
pcl::Grabber* interface = new pcl::io::OpenNI2Grabber();
// make callback function from member function
boost::function<void (const pcl::PointCloud<pcl::PointXYZRGBA>::ConstPtr&)> f =
boost::bind (&SimpleOpenNIProcessor::cloud_cb_, this, _1);
// connect callback function for desired signal. In this case its a point cloud with color values
boost::signals2::connection c = interface->registerCallback (f);
// start receiving point clouds
interface->start ();
// wait until user quits program with Ctrl-C, but no busy-waiting -> sleep (1);
while (true)
boost::this_thread::sleep (boost::posix_time::seconds (1));
// stop the grabber
interface->stop ();
}
};
int main ()
{
SimpleOpenNIProcessor v;
v.run ();
return (0);
}
The details of the exception are
"Unhandled exception at 0x00007FFBEB9206BC in openni_grabber.exe: Microsoft C++ exception: pcl::IOException at memory location 0x00000043E9EFF378."
Related
Context
I build a webserver using boost coroutine ts, boost asio and boost beast.
There is a coroutine for reading and one for writing.
There is a message_to_send queue where messages get pushed to send to the user.
The writing coroutine checks if there is something in the message_to_send queue and sends it.
After sending the writing coroutine suspends itself for 100 milliseconds and checks again for something to write.
Problem
The writing coroutine is polling the message queue every 100 milliseconds. I like to find a solution without polling after some timer has fired.
Posible solution
Maybe ther is a solution to co_await the change of a variable. Maybe creating a async_wait_for_callback with "async_initiate"?
Code example
You can clone the project. Or use the complete example code posted here:
#include <algorithm>
#include <boost/asio.hpp>
#include <boost/asio/io_context.hpp>
#include <boost/asio/system_timer.hpp>
#include <boost/beast.hpp>
#include <boost/beast/websocket.hpp>
#include <boost/bind/bind.hpp>
#include <boost/optional.hpp>
#include <chrono>
#include <cstddef>
#include <deque>
#include <exception>
#include <iostream>
#include <list>
#include <memory>
#include <set>
#include <stdexcept>
#include <string>
// TODO use cmake to find out if the compiler is gcc or clang
#include <coroutine> // enable if build with gcc
// #include <experimental/coroutine> //enable if build with clang
using namespace boost::beast;
using namespace boost::asio;
typedef boost::asio::use_awaitable_t<>::as_default_on_t<boost::asio::basic_waitable_timer<boost::asio::chrono::system_clock>> CoroTimer;
typedef boost::beast::websocket::stream<boost::beast::tcp_stream> Websocket;
using namespace boost::beast;
using namespace boost::asio;
using boost::asio::ip::tcp;
using tcp_acceptor = use_awaitable_t<>::as_default_on_t<tcp::acceptor>;
struct User
{
boost::asio::awaitable<void> writeToClient (std::weak_ptr<Websocket> &connection);
std::deque<std::string> msgQueue{};
std::shared_ptr<CoroTimer> timer{};
};
void
handleMessage (std::string const &msg, std::list<std::shared_ptr<User>> &users, std::shared_ptr<User> user)
{
std::cout << "please implement handle message" << std::endl;
user->msgQueue.push_back ("please implement handle message");
user->timer->cancel ();
}
boost::asio::awaitable<void>
User::writeToClient (std::weak_ptr<Websocket> &connection)
{
try
{
while (not connection.expired ())
{
timer = std::make_shared<CoroTimer> (CoroTimer{ co_await this_coro::executor });
timer->expires_after (std::chrono::system_clock::time_point::max () - std::chrono::system_clock::now ());
try
{
co_await timer->async_wait ();
}
catch (boost::system::system_error &e)
{
using namespace boost::system::errc;
if (operation_canceled == e.code ())
{
// swallow cancel
}
else
{
std::cout << "error in timer boost::system::errc: " << e.code () << std::endl;
abort ();
}
}
while (not msgQueue.empty () && not connection.expired ())
{
auto tmpMsg = std::move (msgQueue.front ());
std::cout << " msg: " << tmpMsg << std::endl;
msgQueue.pop_front ();
co_await connection.lock ()->async_write (buffer (tmpMsg), use_awaitable);
}
}
}
catch (std::exception &e)
{
std::cout << "write Exception: " << e.what () << std::endl;
}
}
class Server
{
public:
Server (boost::asio::ip::tcp::endpoint const &endpoint);
boost::asio::awaitable<void> listener ();
private:
void removeUser (std::list<std::shared_ptr<User>>::iterator user);
boost::asio::awaitable<std::string> my_read (Websocket &ws_);
boost::asio::awaitable<void> readFromClient (std::list<std::shared_ptr<User>>::iterator user, Websocket &connection);
boost::asio::ip::tcp::endpoint _endpoint{};
std::list<std::shared_ptr<User>> users{};
};
namespace this_coro = boost::asio::this_coro;
Server::Server (boost::asio::ip::tcp::endpoint const &endpoint) : _endpoint{ endpoint } {}
awaitable<std::string>
Server::my_read (Websocket &ws_)
{
std::cout << "read" << std::endl;
flat_buffer buffer;
co_await ws_.async_read (buffer, use_awaitable);
auto msg = buffers_to_string (buffer.data ());
std::cout << "number of letters '" << msg.size () << "' msg: '" << msg << "'" << std::endl;
co_return msg;
}
awaitable<void>
Server::readFromClient (std::list<std::shared_ptr<User>>::iterator user, Websocket &connection)
{
try
{
for (;;)
{
auto readResult = co_await my_read (connection);
handleMessage (readResult, users, *user);
}
}
catch (std::exception &e)
{
removeUser (user);
std::cout << "read Exception: " << e.what () << std::endl;
}
}
void
Server::removeUser (std::list<std::shared_ptr<User>>::iterator user)
{
users.erase (user);
}
awaitable<void>
Server::listener ()
{
auto executor = co_await this_coro::executor;
tcp_acceptor acceptor (executor, _endpoint);
for (;;)
{
try
{
auto socket = co_await acceptor.async_accept ();
auto connection = std::make_shared<Websocket> (std::move (socket));
users.emplace_back (std::make_shared<User> ());
std::list<std::shared_ptr<User>>::iterator user = std::next (users.end (), -1);
connection->set_option (websocket::stream_base::timeout::suggested (role_type::server));
connection->set_option (websocket::stream_base::decorator ([] (websocket::response_type &res) { res.set (http::field::server, std::string (BOOST_BEAST_VERSION_STRING) + " websocket-server-async"); }));
co_await connection->async_accept (use_awaitable);
co_spawn (
executor, [connection, this, &user] () mutable { return readFromClient (user, *connection); }, detached);
co_spawn (
executor, [connectionWeakPointer = std::weak_ptr<Websocket>{ connection }, &user] () mutable { return user->get ()->writeToClient (connectionWeakPointer); }, detached);
}
catch (std::exception &e)
{
std::cout << "Server::listener () connect Exception : " << e.what () << std::endl;
}
}
}
auto const DEFAULT_PORT = u_int16_t{ 55555 };
int
main ()
{
try
{
using namespace boost::asio;
io_context io_context (1);
signal_set signals (io_context, SIGINT, SIGTERM);
signals.async_wait ([&] (auto, auto) { io_context.stop (); });
auto server = Server{ { ip::tcp::v4 (), DEFAULT_PORT } };
co_spawn (
io_context, [&server] { return server.listener (); }, detached);
io_context.run ();
}
catch (std::exception &e)
{
std::printf ("Exception: %s\n", e.what ());
}
return 0;
}
EDIT: updated code based on sehe's idea which is marked as answer.
The classical threading solution would be a condition variable. Of course, that's not what you want - I see you even explicitly disabled ASIO threading. Good.
One way - short of providing an Asio service to implement this behaviour - would be to use timers to emulate condition variables. You could use timer that "never" expires (deadline is at timepoint::max()) and manually reset it to timepoint::min() (canceling any async_wait) or any time in the past to signify the condition. Then you can use Timer::async_wait with use_awaitable like you already know how.
Note that you still need to "manually" signal the change. This is what you want because anything else requires kernel process tracing support/hardware debugger facilities which require massive priviliges and tend to be very slow.
You might want to know about associating the use_awaitable as the default completion token for the executor bound to your timer. See e.g. the examples: https://www.boost.org/doc/libs/1_78_0/doc/html/boost_asio/example/cpp17/coroutines_ts/echo_server_with_default.cpp (the HTML docs do NOT link these examples)
I have some generated SPIR-V code which I want to use with the vulkan API. But I get an
Exception thrown at 0x00007FFB68D933CB (nvoglv64.dll) in vulkanCompute.exe: 0xC0000005: Access violation reading location 0x0000000000000008. when trying to create the pipline with vkCreateComputePipelines.
The API calls should be fine, because the same code works with a shader compiled with glslangValidator. Therefore I assume that the generated SPIR-V code must be illformed somehow.
I've checked the SPIR-V code with the validator tool from khronos, using spirv-val --target-env vulkan1.1 mainV.spv which exited without error. Anyhow it is also known that this tool is still incomplete.
I've also tried to use the Radeon GPU Analyzer to compile my SPIR-V code, which is also available online at the shader playground and this tool throws the error Error: Error: internal error: Bil::BilInstructionConvert::Create(60) Code Not Tested! which is not really helpful, but encourages the assumption that the code is malformed.
The SPIR-V code is unfortunately to long to post it here, but it is in the link of the shader playground.
Does anyone know what the problem is with my setting or has any idea how I can verify my SPIR-V code in a better way, without checking all 700 lines of code manually.
I don't thinkt the problem is there, but anyway here is the c++ host code:
#include "vulkan/vulkan.hpp"
#include <iostream>
#include <fstream>
#include <vector>
#define BAIL_ON_BAD_RESULT(result) \
if (VK_SUCCESS != (result)) \
{ \
fprintf(stderr, "Failure at %u %s\n", __LINE__, __FILE__); \
exit(-1); \
}
VkResult vkGetBestComputeQueueNPH(vk::PhysicalDevice &physicalDevice, uint32_t &queueFamilyIndex)
{
auto properties = physicalDevice.getQueueFamilyProperties();
int i = 0;
for (auto prop : properties)
{
vk::QueueFlags maskedFlags = (~(vk::QueueFlagBits::eTransfer | vk::QueueFlagBits::eSparseBinding) & prop.queueFlags);
if (!(vk::QueueFlagBits::eGraphics & maskedFlags) && (vk::QueueFlagBits::eCompute & maskedFlags))
{
queueFamilyIndex = i;
return VK_SUCCESS;
}
i++;
}
i = 0;
for (auto prop : properties)
{
vk::QueueFlags maskedFlags = (~(vk::QueueFlagBits::eTransfer | vk::QueueFlagBits::eSparseBinding) & prop.queueFlags);
if (vk::QueueFlagBits::eCompute & maskedFlags)
{
queueFamilyIndex = i;
return VK_SUCCESS;
}
i++;
}
return VK_ERROR_INITIALIZATION_FAILED;
}
int main(int argc, const char *const argv[])
{
(void)argc;
(void)argv;
try
{
// initialize the vk::ApplicationInfo structure
vk::ApplicationInfo applicationInfo("VecAdd", 1, "Vulkan.hpp", 1, VK_API_VERSION_1_1);
// initialize the vk::InstanceCreateInfo
std::vector<char *> layers = {
"VK_LAYER_LUNARG_api_dump",
"VK_LAYER_KHRONOS_validation"
};
vk::InstanceCreateInfo instanceCreateInfo({}, &applicationInfo, static_cast<uint32_t>(layers.size()), layers.data());
// create a UniqueInstance
vk::UniqueInstance instance = vk::createInstanceUnique(instanceCreateInfo);
auto physicalDevices = instance->enumeratePhysicalDevices();
for (auto &physicalDevice : physicalDevices)
{
auto props = physicalDevice.getProperties();
// get the QueueFamilyProperties of the first PhysicalDevice
std::vector<vk::QueueFamilyProperties> queueFamilyProperties = physicalDevice.getQueueFamilyProperties();
uint32_t computeQueueFamilyIndex = 0;
// get the best index into queueFamiliyProperties which supports compute and stuff
BAIL_ON_BAD_RESULT(vkGetBestComputeQueueNPH(physicalDevice, computeQueueFamilyIndex));
std::vector<char *>extensions = {"VK_EXT_external_memory_host", "VK_KHR_shader_float16_int8"};
// create a UniqueDevice
float queuePriority = 0.0f;
vk::DeviceQueueCreateInfo deviceQueueCreateInfo(vk::DeviceQueueCreateFlags(), static_cast<uint32_t>(computeQueueFamilyIndex), 1, &queuePriority);
vk::StructureChain<vk::DeviceCreateInfo, vk::PhysicalDeviceFeatures2, vk::PhysicalDeviceShaderFloat16Int8Features> createDeviceInfo = {
vk::DeviceCreateInfo(vk::DeviceCreateFlags(), 1, &deviceQueueCreateInfo, 0, nullptr, static_cast<uint32_t>(extensions.size()), extensions.data()),
vk::PhysicalDeviceFeatures2(),
vk::PhysicalDeviceShaderFloat16Int8Features()
};
createDeviceInfo.get<vk::PhysicalDeviceFeatures2>().features.setShaderInt64(true);
createDeviceInfo.get<vk::PhysicalDeviceShaderFloat16Int8Features>().setShaderInt8(true);
vk::UniqueDevice device = physicalDevice.createDeviceUnique(createDeviceInfo.get<vk::DeviceCreateInfo>());
auto memoryProperties2 = physicalDevice.getMemoryProperties2();
vk::PhysicalDeviceMemoryProperties const &memoryProperties = memoryProperties2.memoryProperties;
const int32_t bufferLength = 16384;
const uint32_t bufferSize = sizeof(int32_t) * bufferLength;
// we are going to need two buffers from this one memory
const vk::DeviceSize memorySize = bufferSize * 3;
// set memoryTypeIndex to an invalid entry in the properties.memoryTypes array
uint32_t memoryTypeIndex = VK_MAX_MEMORY_TYPES;
for (uint32_t k = 0; k < memoryProperties.memoryTypeCount; k++)
{
if ((vk::MemoryPropertyFlagBits::eHostVisible | vk::MemoryPropertyFlagBits::eHostCoherent) & memoryProperties.memoryTypes[k].propertyFlags &&
(memorySize < memoryProperties.memoryHeaps[memoryProperties.memoryTypes[k].heapIndex].size))
{
memoryTypeIndex = k;
std::cout << "found memory " << memoryTypeIndex + 1 << " out of " << memoryProperties.memoryTypeCount << std::endl;
break;
}
}
BAIL_ON_BAD_RESULT(memoryTypeIndex == VK_MAX_MEMORY_TYPES ? VK_ERROR_OUT_OF_HOST_MEMORY : VK_SUCCESS);
auto memory = device->allocateMemoryUnique(vk::MemoryAllocateInfo(memorySize, memoryTypeIndex));
auto in_buffer = device->createBufferUnique(vk::BufferCreateInfo(vk::BufferCreateFlags(), bufferSize, vk::BufferUsageFlagBits::eStorageBuffer, vk::SharingMode::eExclusive));
device->bindBufferMemory(in_buffer.get(), memory.get(), 0);
// create a DescriptorSetLayout
std::vector<vk::DescriptorSetLayoutBinding> descriptorSetLayoutBinding{
{0, vk::DescriptorType::eStorageBuffer, 1, vk::ShaderStageFlagBits::eCompute}};
vk::UniqueDescriptorSetLayout descriptorSetLayout = device->createDescriptorSetLayoutUnique(vk::DescriptorSetLayoutCreateInfo(vk::DescriptorSetLayoutCreateFlags(), static_cast<uint32_t>(descriptorSetLayoutBinding.size()), descriptorSetLayoutBinding.data()));
std::cout << "Memory bound" << std::endl;
std::ifstream myfile;
myfile.open("shaders/MainV.spv", std::ios::ate | std::ios::binary);
if (!myfile.is_open())
{
std::cout << "File not found" << std::endl;
return EXIT_FAILURE;
}
auto size = myfile.tellg();
std::vector<unsigned int> shader_spv(size / sizeof(unsigned int));
myfile.seekg(0);
myfile.read(reinterpret_cast<char *>(shader_spv.data()), size);
myfile.close();
std::cout << "Shader size: " << shader_spv.size() << std::endl;
auto shaderModule = device->createShaderModuleUnique(vk::ShaderModuleCreateInfo(vk::ShaderModuleCreateFlags(), shader_spv.size() * sizeof(unsigned int), shader_spv.data()));
// create a PipelineLayout using that DescriptorSetLayout
vk::UniquePipelineLayout pipelineLayout = device->createPipelineLayoutUnique(vk::PipelineLayoutCreateInfo(vk::PipelineLayoutCreateFlags(), 1, &descriptorSetLayout.get()));
vk::ComputePipelineCreateInfo computePipelineInfo(
vk::PipelineCreateFlags(),
vk::PipelineShaderStageCreateInfo(
vk::PipelineShaderStageCreateFlags(),
vk::ShaderStageFlagBits::eCompute,
shaderModule.get(),
"_ZTSZZ4mainENK3$_0clERN2cl4sycl7handlerEE6VecAdd"),
pipelineLayout.get());
auto pipeline = device->createComputePipelineUnique(nullptr, computePipelineInfo);
auto descriptorPoolSize = vk::DescriptorPoolSize(vk::DescriptorType::eStorageBuffer, 2);
auto descriptorPool = device->createDescriptorPool(vk::DescriptorPoolCreateInfo(vk::DescriptorPoolCreateFlags(), 1, 1, &descriptorPoolSize));
auto commandPool = device->createCommandPoolUnique(vk::CommandPoolCreateInfo(vk::CommandPoolCreateFlags(), computeQueueFamilyIndex));
auto commandBuffer = std::move(device->allocateCommandBuffersUnique(vk::CommandBufferAllocateInfo(commandPool.get(), vk::CommandBufferLevel::ePrimary, 1)).front());
commandBuffer->begin(vk::CommandBufferBeginInfo(vk::CommandBufferUsageFlags(vk::CommandBufferUsageFlagBits::eOneTimeSubmit)));
commandBuffer->bindPipeline(vk::PipelineBindPoint::eCompute, pipeline.get());
commandBuffer->dispatch(bufferSize / sizeof(int32_t), 1, 1);
commandBuffer->end();
auto queue = device->getQueue(computeQueueFamilyIndex, 0);
vk::SubmitInfo submitInfo(0, nullptr, nullptr, 1, &commandBuffer.get(), 0, nullptr);
queue.submit(1, &submitInfo, vk::Fence());
queue.waitIdle();
printf("all done\nWoohooo!!!\n\n");
}
}
catch (vk::SystemError &err)
{
std::cout << "vk::SystemError: " << err.what() << std::endl;
exit(-1);
}
catch (std::runtime_error &err)
{
std::cout << "std::runtime_error: " << err.what() << std::endl;
exit(-1);
}
catch (...)
{
std::cout << "unknown error\n";
exit(-1);
}
return EXIT_SUCCESS;
}
Well after checking out line per line it showed that the problem is when working with pointers of pointers. For me it is still not clear from the specification that it is not allowed, but it is understandable that it does not work with logical pointers.
Still the behaviour is strange that the validator is not able to note that and that compiling the SPIRV code crashes instead of throwing a clear error message.
So in the end, it was the Shader code which was wrong.
I am trying to use the example code from here.
I have made some changes in order to save the images to the computer. When I read the data in MATLAB it seems like values that should be 0 are set to 2047, and overall it does not seem to be correct when I reconstruct the 3D points using the default intrinsic camera parameters.
What I want to achieve is to save the images so that I can use
img = single(imread(depth.png'))/ 1000
and have the depth values in meters, and pixels with no measurements should be zero.
It is the Kinect V1 by the way.
Here is the code with comments where I have tried to change.
#include "libfreenect.hpp"
#include <iostream>
#include <vector>
#include <cmath>
#include <pthread.h>
#include <cv.h>
#include <cxcore.h>
#include <highgui.h>
using namespace cv;
using namespace std;
class myMutex {
public:
myMutex() {
pthread_mutex_init( &m_mutex, NULL );
}
void lock() {
pthread_mutex_lock( &m_mutex );
}
void unlock() {
pthread_mutex_unlock( &m_mutex );
}
private:
pthread_mutex_t m_mutex;
};
// Should one use FREENECT_DEPTH_REGISTERED instead of FREENECT_DEPTH_11BIT?
class MyFreenectDevice : public Freenect::FreenectDevice {
public:
MyFreenectDevice(freenect_context *_ctx, int _index)
: Freenect::FreenectDevice(_ctx, _index), m_buffer_depth(FREENECT_DEPTH_11BIT),
m_buffer_rgb(FREENECT_VIDEO_RGB), m_gamma(2048), m_new_rgb_frame(false),
m_new_depth_frame(false), depthMat(Size(640,480),CV_16UC1),
rgbMat(Size(640,480), CV_8UC3, Scalar(0)),
ownMat(Size(640,480),CV_8UC3,Scalar(0)) {
for( unsigned int i = 0 ; i < 2048 ; i++) {
float v = i/2048.0;
v = std::pow(v, 3)* 6;
m_gamma[i] = v*6*256;
}
}
// Do not call directly even in child
void VideoCallback(void* _rgb, uint32_t timestamp) {
std::cout << "RGB callback" << std::endl;
m_rgb_mutex.lock();
uint8_t* rgb = static_cast<uint8_t*>(_rgb);
rgbMat.data = rgb;
m_new_rgb_frame = true;
m_rgb_mutex.unlock();
};
// Do not call directly even in child
void DepthCallback(void* _depth, uint32_t timestamp) {
std::cout << "Depth callback" << std::endl;
m_depth_mutex.lock();
uint16_t* depth = static_cast<uint16_t*>(_depth);
// Here I use memcpy instead so I can use uint16
// memcpy(depthMat.data,depth,depthMat.rows*depthMat.cols*sizeof(uint16_t));
depthMat.data = (uchar*) depth;
m_new_depth_frame = true;
m_depth_mutex.unlock();
}
bool getVideo(Mat& output) {
m_rgb_mutex.lock();
if(m_new_rgb_frame) {
cv::cvtColor(rgbMat, output, CV_RGB2BGR);
m_new_rgb_frame = false;
m_rgb_mutex.unlock();
return true;
} else {
m_rgb_mutex.unlock();
return false;
}
}
bool getDepth(Mat& output) {
m_depth_mutex.lock();
if(m_new_depth_frame) {
depthMat.copyTo(output);
m_new_depth_frame = false;
m_depth_mutex.unlock();
return true;
} else {
m_depth_mutex.unlock();
return false;
}
}
private:
// Should it be uint16_t instead or even higher?
std::vector<uint8_t> m_buffer_depth;
std::vector<uint8_t> m_buffer_rgb;
std::vector<uint16_t> m_gamma;
Mat depthMat;
Mat rgbMat;
Mat ownMat;
myMutex m_rgb_mutex;
myMutex m_depth_mutex;
bool m_new_rgb_frame;
bool m_new_depth_frame;
};
int main(int argc, char **argv) {
bool die(false);
string filename("snapshot");
string suffix(".png");
int i_snap(0),iter(0);
Mat depthMat(Size(640,480),CV_16UC1);
Mat depthf (Size(640,480),CV_8UC1);
Mat rgbMat(Size(640,480),CV_8UC3,Scalar(0));
Mat ownMat(Size(640,480),CV_8UC3,Scalar(0));
// The next two lines must be changed as Freenect::Freenect
// isn't a template but the method createDevice:
// Freenect::Freenect<MyFreenectDevice> freenect;
// MyFreenectDevice& device = freenect.createDevice(0);
// by these two lines:
Freenect::Freenect freenect;
MyFreenectDevice& device = freenect.createDevice<MyFreenectDevice>(0);
namedWindow("rgb",CV_WINDOW_AUTOSIZE);
namedWindow("depth",CV_WINDOW_AUTOSIZE);
device.startVideo();
device.startDepth();
while (!die) {
device.getVideo(rgbMat);
device.getDepth(depthMat);
// Here I save the depth images
std::ostringstream file;
file << filename << i_snap << suffix;
cv::imwrite(file.str(),depthMat);
cv::imshow("rgb", rgbMat);
depthMat.convertTo(depthf, CV_8UC1, 255.0/2048.0);
cv::imshow("depth",depthf);
if(iter >= 1000) break;
iter++;
}
device.stopVideo();
device.stopDepth();
return 0;
}
Thanks in advance!
Erik
I dont have any experience with OpenKinect in particular; but should your depth buffer be uint16?
std::vector<uint8_t> m_buffer_depth;
Also; for Matlab, do check if the image that you are reading is a uint16 or uint8. If its the latter then convert it to uint16
uint16(imread('depth.png'));
Sorry couldn't help more. Hope this helps.
The values you have are the raw depth values. You need to remap those into MM for the numbers to make sense. Kinect 1 can see up to 10 meters. So I would go with raw_values/2407*10000.
If the values are saturated at 2047, you are probably using the FREENECT_DEPTH_11BIT_PACKED depth format.
For work in Matlab, it is always easier to use FREENECT_DEPTH_MM or FREENECT_DEPTH_REGISTERED.
Enjoy.
When I try to Poll a keydown event in SDL 2.0 and hold down a key, I get multiple keydown events, one after the other. When I try running the same program using SDL 1.2.15 (with minor changes as SDL 1.2.15 does not support SDL_Window), I do not have this issue. The keydown event only occurs once like it is supposed to. I even tried executing the program on a different computer to make sure that it was not a computer specific issue.
The relevant code is as follows:
#include <iostream>
#include <SDL.h>
using namespace std;
SDL_Event event;
SDL_Window* screen = NULL;
int main(int argc, char* args[])
{
if(SDL_Init(SDL_INIT_EVERYTHING) == -1)
{
cout << "ERROR INIT";
return 0;
}
screen = SDL_CreateWindow("My Game Window",
SDL_WINDOWPOS_UNDEFINED,
SDL_WINDOWPOS_UNDEFINED,
640, 480,
SDL_WINDOW_OPENGL);
bool quit = false;
while(!quit)
{
while(SDL_PollEvent(&event))
{
if(event.type == SDL_KEYDOWN)
{
cout << "KEY PRESSED!" << endl;
}
}
}
return 0;
}
Ignore SDL_KEYDOWN events where event.key.repeat != 0.
Using Point Cloud Library on Ubuntu, I am trying to take multiple point clouds from the Kinect and store them in memory for later use in the program. My code shown at the bottom of this post is designed to store the first Point Cloud from the Kinect and output its width and height. The program gives me a runtime error:
/usr/include/boost/smart_ptr/shared_ptr.hpp:418: T* boost::shared_ptr<T>::operator->() const [with T = pcl::PointCloud<pcl::PointXYZ>]: Assertion `px != 0' failed.
All help is greatly appreciated and I always accept an answer!
The code:
#include <pcl/io/openni_grabber.h>
#include <pcl/visualization/cloud_viewer.h>
class SimpleOpenNIViewer
{
public:
SimpleOpenNIViewer () : viewer ("PCL OpenNI Viewer") {}
pcl::PointCloud<pcl::PointXYZ>::Ptr prevCloud;
void cloud_cb_ (const pcl::PointCloud<pcl::PointXYZ>::ConstPtr &cloud)
{
if (!viewer.wasStopped())
viewer.showCloud (cloud);
//ICP start
if(!prevCloud) {
pcl::PointCloud<pcl::PointXYZ>::Ptr prevCloud( new pcl::PointCloud<pcl::PointXYZ>());
pcl::copyPointCloud<pcl::PointXYZ, pcl::PointXYZ>(*cloud, *prevCloud);
}
cout << prevCloud->width << " by " << prevCloud->height << endl;
}
void run ()
{
pcl::Grabber* interface = new pcl::OpenNIGrabber();
boost::function<void (const pcl::PointCloud<pcl::PointXYZ>::ConstPtr&)> f =
boost::bind (&SimpleOpenNIViewer::cloud_cb_, this, _1);
interface->registerCallback (f);
interface->start ();
while (!viewer.wasStopped())
{
boost::this_thread::sleep (boost::posix_time::seconds (1));
}
interface->stop ();
}
pcl::visualization::CloudViewer viewer;
};
int main ()
{
SimpleOpenNIViewer v;
v.run ();
return 0;
}
Try this, I don't have the Kinect drivers installed so I can't test. Basically in my version prevCloud is instantiated in the constructor so (!prevCloud) will always equal 'false'. Which is to say prevCloud.get() != NULL.
#include <pcl/io/openni_grabber.h>
#include <pcl/visualization/cloud_viewer.h>
class SimpleOpenNIViewer
{
typedef pcl::PointXYZ Point;
typedef pcl::PointCloud<Point> PointCloud;
public:
SimpleOpenNIViewer () : viewer ("PCL OpenNI Viewer") {
prevCloud = PointCloud::Ptr(NULL);
}
void cloud_cb_ (const PointCloud::ConstPtr &cloud)
{
if (!viewer.wasStopped())
viewer.showCloud (cloud);
if (!prevCloud) // init previous cloud if first frame
prevCloud = PointCloud::Ptr(new PointCloud);
else. // else RunICP between cloud and prevCloud
//RunICP(cloud,prevCloud);
//Copy new frame in to prevCloud
pcl::copyPointCloud<Point, Point>(*cloud, *prevCloud);
cout << prevCloud->width << " by " << prevCloud->height << endl;
}
void run ()
{
pcl::Grabber* interface = new pcl::OpenNIGrabber();
boost::function<void (const PointCloud::ConstPtr&)> f =
boost::bind (&SimpleOpenNIViewer::cloud_cb_, this, _1);
interface->registerCallback (f);
interface->start ();
while (!viewer.wasStopped())
{
boost::this_thread::sleep (boost::posix_time::seconds (1));
}
interface->stop ();
}
PointCloud::Ptr prevCloud;
pcl::visualization::CloudViewer viewer;
};
int main ()
{
SimpleOpenNIViewer v;
v.run ();
return 0;
}
You're creating a new local variable prevCloud and copying cloud into it, rather than copying into the prevCloud field. So, if the field's value was null before the if {}, it is still null after the if {} and so it throws an error when you try to dereference it.
May be this code can help you, the cloud is saved in a "pcd" file, take a look here
And other option is work with the "Kinfu" project from PCL