How to co_await for a change in a variable using boost coroutine ts? - boost-asio

Context
I build a webserver using boost coroutine ts, boost asio and boost beast.
There is a coroutine for reading and one for writing.
There is a message_to_send queue where messages get pushed to send to the user.
The writing coroutine checks if there is something in the message_to_send queue and sends it.
After sending the writing coroutine suspends itself for 100 milliseconds and checks again for something to write.
Problem
The writing coroutine is polling the message queue every 100 milliseconds. I like to find a solution without polling after some timer has fired.
Posible solution
Maybe ther is a solution to co_await the change of a variable. Maybe creating a async_wait_for_callback with "async_initiate"?
Code example
You can clone the project. Or use the complete example code posted here:
#include <algorithm>
#include <boost/asio.hpp>
#include <boost/asio/io_context.hpp>
#include <boost/asio/system_timer.hpp>
#include <boost/beast.hpp>
#include <boost/beast/websocket.hpp>
#include <boost/bind/bind.hpp>
#include <boost/optional.hpp>
#include <chrono>
#include <cstddef>
#include <deque>
#include <exception>
#include <iostream>
#include <list>
#include <memory>
#include <set>
#include <stdexcept>
#include <string>
// TODO use cmake to find out if the compiler is gcc or clang
#include <coroutine> // enable if build with gcc
// #include <experimental/coroutine> //enable if build with clang
using namespace boost::beast;
using namespace boost::asio;
typedef boost::asio::use_awaitable_t<>::as_default_on_t<boost::asio::basic_waitable_timer<boost::asio::chrono::system_clock>> CoroTimer;
typedef boost::beast::websocket::stream<boost::beast::tcp_stream> Websocket;
using namespace boost::beast;
using namespace boost::asio;
using boost::asio::ip::tcp;
using tcp_acceptor = use_awaitable_t<>::as_default_on_t<tcp::acceptor>;
struct User
{
boost::asio::awaitable<void> writeToClient (std::weak_ptr<Websocket> &connection);
std::deque<std::string> msgQueue{};
std::shared_ptr<CoroTimer> timer{};
};
void
handleMessage (std::string const &msg, std::list<std::shared_ptr<User>> &users, std::shared_ptr<User> user)
{
std::cout << "please implement handle message" << std::endl;
user->msgQueue.push_back ("please implement handle message");
user->timer->cancel ();
}
boost::asio::awaitable<void>
User::writeToClient (std::weak_ptr<Websocket> &connection)
{
try
{
while (not connection.expired ())
{
timer = std::make_shared<CoroTimer> (CoroTimer{ co_await this_coro::executor });
timer->expires_after (std::chrono::system_clock::time_point::max () - std::chrono::system_clock::now ());
try
{
co_await timer->async_wait ();
}
catch (boost::system::system_error &e)
{
using namespace boost::system::errc;
if (operation_canceled == e.code ())
{
// swallow cancel
}
else
{
std::cout << "error in timer boost::system::errc: " << e.code () << std::endl;
abort ();
}
}
while (not msgQueue.empty () && not connection.expired ())
{
auto tmpMsg = std::move (msgQueue.front ());
std::cout << " msg: " << tmpMsg << std::endl;
msgQueue.pop_front ();
co_await connection.lock ()->async_write (buffer (tmpMsg), use_awaitable);
}
}
}
catch (std::exception &e)
{
std::cout << "write Exception: " << e.what () << std::endl;
}
}
class Server
{
public:
Server (boost::asio::ip::tcp::endpoint const &endpoint);
boost::asio::awaitable<void> listener ();
private:
void removeUser (std::list<std::shared_ptr<User>>::iterator user);
boost::asio::awaitable<std::string> my_read (Websocket &ws_);
boost::asio::awaitable<void> readFromClient (std::list<std::shared_ptr<User>>::iterator user, Websocket &connection);
boost::asio::ip::tcp::endpoint _endpoint{};
std::list<std::shared_ptr<User>> users{};
};
namespace this_coro = boost::asio::this_coro;
Server::Server (boost::asio::ip::tcp::endpoint const &endpoint) : _endpoint{ endpoint } {}
awaitable<std::string>
Server::my_read (Websocket &ws_)
{
std::cout << "read" << std::endl;
flat_buffer buffer;
co_await ws_.async_read (buffer, use_awaitable);
auto msg = buffers_to_string (buffer.data ());
std::cout << "number of letters '" << msg.size () << "' msg: '" << msg << "'" << std::endl;
co_return msg;
}
awaitable<void>
Server::readFromClient (std::list<std::shared_ptr<User>>::iterator user, Websocket &connection)
{
try
{
for (;;)
{
auto readResult = co_await my_read (connection);
handleMessage (readResult, users, *user);
}
}
catch (std::exception &e)
{
removeUser (user);
std::cout << "read Exception: " << e.what () << std::endl;
}
}
void
Server::removeUser (std::list<std::shared_ptr<User>>::iterator user)
{
users.erase (user);
}
awaitable<void>
Server::listener ()
{
auto executor = co_await this_coro::executor;
tcp_acceptor acceptor (executor, _endpoint);
for (;;)
{
try
{
auto socket = co_await acceptor.async_accept ();
auto connection = std::make_shared<Websocket> (std::move (socket));
users.emplace_back (std::make_shared<User> ());
std::list<std::shared_ptr<User>>::iterator user = std::next (users.end (), -1);
connection->set_option (websocket::stream_base::timeout::suggested (role_type::server));
connection->set_option (websocket::stream_base::decorator ([] (websocket::response_type &res) { res.set (http::field::server, std::string (BOOST_BEAST_VERSION_STRING) + " websocket-server-async"); }));
co_await connection->async_accept (use_awaitable);
co_spawn (
executor, [connection, this, &user] () mutable { return readFromClient (user, *connection); }, detached);
co_spawn (
executor, [connectionWeakPointer = std::weak_ptr<Websocket>{ connection }, &user] () mutable { return user->get ()->writeToClient (connectionWeakPointer); }, detached);
}
catch (std::exception &e)
{
std::cout << "Server::listener () connect Exception : " << e.what () << std::endl;
}
}
}
auto const DEFAULT_PORT = u_int16_t{ 55555 };
int
main ()
{
try
{
using namespace boost::asio;
io_context io_context (1);
signal_set signals (io_context, SIGINT, SIGTERM);
signals.async_wait ([&] (auto, auto) { io_context.stop (); });
auto server = Server{ { ip::tcp::v4 (), DEFAULT_PORT } };
co_spawn (
io_context, [&server] { return server.listener (); }, detached);
io_context.run ();
}
catch (std::exception &e)
{
std::printf ("Exception: %s\n", e.what ());
}
return 0;
}
EDIT: updated code based on sehe's idea which is marked as answer.

The classical threading solution would be a condition variable. Of course, that's not what you want - I see you even explicitly disabled ASIO threading. Good.
One way - short of providing an Asio service to implement this behaviour - would be to use timers to emulate condition variables. You could use timer that "never" expires (deadline is at timepoint::max()) and manually reset it to timepoint::min() (canceling any async_wait) or any time in the past to signify the condition. Then you can use Timer::async_wait with use_awaitable like you already know how.
Note that you still need to "manually" signal the change. This is what you want because anything else requires kernel process tracing support/hardware debugger facilities which require massive priviliges and tend to be very slow.
You might want to know about associating the use_awaitable as the default completion token for the executor bound to your timer. See e.g. the examples: https://www.boost.org/doc/libs/1_78_0/doc/html/boost_asio/example/cpp17/coroutines_ts/echo_server_with_default.cpp (the HTML docs do NOT link these examples)

Related

asio use_future instead of yield[ec]

i want to make container of futures ,each future is void result of a task so that i could use wait_for_any on the container ,each task is coroutine which i currently implement using yield_context,and inside this coroutine there initiating function which returns ec and result where i use ec to analyze result.and then another coroutine is called passes same yield_context .
i want to know how to make this design.
and if i ll use use_future ,how can i pass error code to ec not throwing it unless there is no way except throwing it ,in this case i ll put try and catch around async initiating functions.
all these tasks will be posted ,spawned ... on asio io_service .
this is my main parts of code:
this is the spawn of task
boost::asio::spawn(GetServiceReference(), boost::bind(&HTTPRequest::Execute, boost::placeholders::_1, m_HttpClient_request_name, Get_mHTTPClient_Responses_Map()));
and this is the coroutine using yield_context
void HTTPRequest::Execute(boost::asio::yield_context yield_r, std::string request_name, std::map<std::string, boost::shared_ptr<HTTPResponse>>& mHTTPClient_Responses_Map)
{
resolver_iterator iterator_connect = boost::asio::async_connect(mSock, iterator_resolve, yield_r[ec]);
}
and inside Execute we use ec to analyze
if (ec == boost::system::errc::errc_t::success){}
and here we start another coroutine passing same yield_context
SendRequest(yield_r);
}
i want to change this so i have container of futures for all spawned Execute,i do not care about results of Execute because i put them to member class Response.
But i need result in future so that i can use wait_any on the container .
If you can change your implementation, use the async_result pattern.
This makes it so you can use your method with any of the approaches (completion handler, yield context or use_future).
I reproduce the self-contained example from here for inspiration:
Comprehensive Demo
Showing how to use it with with
coro's and yield[ec]
coro's and yield + exceptions
std::future
completion handlers
Live On Coliru
#define BOOST_COROUTINES_NO_DEPRECATION_WARNING
#include <iostream>
#include <boost/asio.hpp>
#include <boost/asio/spawn.hpp>
#include <boost/asio/use_future.hpp>
using boost::system::error_code;
namespace asio = boost::asio;
template <typename Token>
auto async_meaning_of_life(bool success, Token&& token)
{
#if BOOST_VERSION >= 106600
using result_type = typename asio::async_result<std::decay_t<Token>, void(error_code, int)>;
typename result_type::completion_handler_type handler(std::forward<Token>(token));
result_type result(handler);
#else
typename asio::handler_type<Token, void(error_code, int)>::type
handler(std::forward<Token>(token));
asio::async_result<decltype (handler)> result (handler);
#endif
if (success)
handler(error_code{}, 42);
else
handler(asio::error::operation_aborted, 0);
return result.get ();
}
void using_yield_ec(asio::yield_context yield) {
for (bool success : { true, false }) {
boost::system::error_code ec;
auto answer = async_meaning_of_life(success, yield[ec]);
std::cout << __FUNCTION__ << ": Result: " << ec.message() << "\n";
std::cout << __FUNCTION__ << ": Answer: " << answer << "\n";
}
}
void using_yield_catch(asio::yield_context yield) {
for (bool success : { true, false })
try {
auto answer = async_meaning_of_life(success, yield);
std::cout << __FUNCTION__ << ": Answer: " << answer << "\n";
} catch(boost::system::system_error const& e) {
std::cout << __FUNCTION__ << ": Caught: " << e.code().message() << "\n";
}
}
void using_future() {
for (bool success : { true, false })
try {
auto answer = async_meaning_of_life(success, asio::use_future);
std::cout << __FUNCTION__ << ": Answer: " << answer.get() << "\n";
} catch(boost::system::system_error const& e) {
std::cout << __FUNCTION__ << ": Caught: " << e.code().message() << "\n";
}
}
void using_handler() {
for (bool success : { true, false })
async_meaning_of_life(success, [](error_code ec, int answer) {
std::cout << "using_handler: Result: " << ec.message() << "\n";
std::cout << "using_handler: Answer: " << answer << "\n";
});
}
int main() {
asio::io_service svc;
spawn(svc, using_yield_ec);
spawn(svc, using_yield_catch);
std::thread work([] {
using_future();
using_handler();
});
svc.run();
work.join();
}
Prints:
using_yield_ec: Result: Success
using_yield_ec: Answer: 42
using_yield_ec: Result: Operation canceled
using_yield_ec: Answer: 0
using_future: Answer: 42
using_yield_catch: Answer: 42
using_yield_catch: Caught: Operation canceled
using_future: Caught: Operation canceled
using_handler: Result: Success
using_handler: Answer: 42
using_handler: Result: Operation canceled
using_handler: Answer: 0
Note: for simplicity I have not added output synchronization, so the output can become intermingled depending on runtime execution order

UDP broadcasting loses more packets than received?

Scenario:
I have a simple program to broadcast a little message over the bcast interface on p2p-wlan0-0 on Linux. Following is the program.
Code:
#include <boost/asio.hpp>
#include <boost/system/error_code.hpp>
#include <iostream>
#include <memory>
#include <string>
#include <thread>
boost::asio::io_service ioService;
boost::asio::ip::udp::socket udpSocket = boost::asio::ip::udp::socket(ioService);
boost::asio::ip::udp::endpoint endPoint;
void Prepare() {
const int port = 36512; // Some port
const std::string& broadcastIPAddress = "192.168.1.1";
udpSocket.open(boost::asio::ip::udp::v4());
udpSocket.set_option(boost::asio::socket_base::reuse_address(true));
udpSocket.set_option(boost::asio::socket_base::broadcast(true));
boost::asio::ip::address_v4 address = boost::asio::ip::address_v4::from_string(broadcastIPAddress);
boost::asio::ip::address_v4 broadcastAddressV4 = boost::asio::ip::address_v4::broadcast(address,
boost::asio::ip::address_v4::from_string("255.255.255.0")) ;
endPoint = boost::asio::ip::udp::endpoint(broadcastAddressV4, port);
boost::asio::ip::address interfaceIPAddress(boost::asio::ip::address_v4::from_string(broadcastIPAddress));
boost::system::error_code ec;
udpSocket.set_option(boost::asio::ip::multicast::outbound_interface(interfaceIPAddress.to_v4()), ec);
if (ec) {
throw std::runtime_error(ec.message());
}
}
void Broadcast(const std::string& messageToBroadcast) {
boost::system::error_code ec;
udpSocket.send_to(boost::asio::buffer(messageToBroadcast.c_str(), messageToBroadcast.length()), endPoint, 0, ec);
if (ec) {
throw std::runtime_error(ec.message());
}
std::cout << "Broadcasted udp message " << messageToBroadcast << std::endl;
}
int main() {
std::cout << "Program starts" << std::endl;
std::atomic<bool> stopped{false};
std::thread t([&stopped](){
std::cout << "Broadcasting thread starting up" << std::endl;
Prepare();
do {
Broadcast("hello");
std::this_thread::sleep_for(std::chrono::seconds(1));
} while(!stopped.load());
});
std::this_thread::sleep_for(std::chrono::minutes(1));
stopped.store(true);
t.join();
std::cout << "Program ends" << std::endl;
}
Receiver is a receive loop blindly receiving data. And the whole thing works. I am able to send and receive packets.
Problem:
But, I lose a lot of packets over the broadcasting interface. Its like one in a 10 packets is received. I am surprised because its a p2p connection. It takes me 10 broadcasts to get one received on the receiver side. Really strange!
Is this how its supposed to work by the nature of it? Is UDP broadcasting so unreliable? Or am I doing a mistake in the way I have set up the UDP socket?

SIGSEGV on second call to boost::asio::udp socket::async_recv on worker boost::thread

I get a SIGSEGV in following class on the second time I call the start_receive(). It works correctly in my open() function, but seems to fail when input is received and I try restarting listen for more input:
#0 0x0000555555584154 in boost::asio::basic_io_object<boost::asio::datagram_socket_service<boost::asio::ip::udp>, true>::get_service (this=0x100007f00000000)
at /usr/include/boost/asio/basic_io_object.hpp:225
#1 0x000055555558398b in boost::asio::basic_datagram_socket<boost::asio::ip::udp, boost::asio::datagram_socket_service<boost::asio::ip::udp> >::async_receive_from<boost::asio::mutable_buffers_1, boost::_bi::bind_t<int, boost::_mfi::mf2<int, Vast::net_udpNC_MChandler, boost::system::error_code const&, unsigned long>, boost::_bi::list3<boost::_bi::value<Vast::net_udpNC_MChandler*>, boost::arg<1> (*)(), boost::arg<2> (*)()> > > (this=0x100007f00000000, buffers=...,
sender_endpoint=..., handler=...)
at /usr/include/boost/asio/basic_datagram_socket.hpp:895
#2 0x000055555557a889 in Vast::net_udpNC_MChandler::start_receive (
this=0x7fffffff5c70) at net_udpnc_mchandler.cpp:58
#3 0x000055555557aa77 in Vast::net_udpNC_MChandler::handle_input (
this=0x7fffffff5c70, error=..., bytes_transferred=24)
at net_udpnc_mchandler.cpp:100
#4 0x000055555557abb3 in Vast::net_udpNC_MChandler::handle_buffer (
this=0x7fffffff5c70, buf=0x7fffffffdad0 "\035\300", bytes_transferred=24)
at net_udpnc_mchandler.cpp:114
#5 0x000055555556397f in test_process_encoded ()
at unittest_net_udpnc_mchandler.cpp:43
#6 0x000055555556400e in main () at unittest_net_udpnc_mchandler.cpp:101
Header:
class net_udpNC_MChandler
{
public:
net_udpNC_MChandler(ip::udp::endpoint local_endpoint);
//MChandler will run its own io_service
int open (AbstractRLNCMsgReceiver *msghandler);
int handle_buffer (char *buf, std::size_t bytes_transferred);
protected:
//Start the receiving loop
void start_receive ();
// handling incoming message
int handle_input (const boost::system::error_code& error,
std::size_t bytes_transferred);
private:
ip::udp::socket *_udp;
ip::udp::endpoint _remote_endpoint_;
ip::udp::endpoint _local_endpoint;
ip::udp::endpoint MC_address;
char _buf[VAST_BUFSIZ];
AbstractRLNCMsgReceiver *_msghandler = NULL;
io_service *_io_service;
boost::thread *_iosthread;
};
Source file:
net_udpNC_MChandler::net_udpNC_MChandler(ip::udp::endpoint local_endpoint) :
MC_address(ip::address::from_string("239.255.0.1"), 1037)
{
_io_service = new io_service();
_local_endpoint = local_endpoint;
}
int net_udpNC_MChandler::open(AbstractRLNCMsgReceiver *msghandler) {
_msghandler = msghandler;
if (_udp == NULL) {
_udp = new ip::udp::socket(*_io_service);
_udp->open(ip::udp::v4());
_udp->set_option(ip::udp::socket::reuse_address(true));
_udp->set_option(ip::multicast::join_group(MC_address.address ()));
boost::system::error_code ec;
_udp->bind(MC_address, ec);
std::cout << "net_udpnc_mchandler::open " + ec.message() << std::endl;
if (ec)
{
std::cout << "net_udpnc_mchandler:: open MC address failed" << ec.message() << std::endl;
}
//Add async receive to io_service queue
start_receive();
std::cout << "net_udpnc_mchandler::open _udp->_local_endpoint: " << _udp->local_endpoint() << " _local_endpoint" << _local_endpoint << std::endl;
//Start the thread handling async receives
_iosthread = new boost::thread(boost::bind(&boost::asio::io_service::run, _io_service));
}
return 0;
}
void net_udpNC_MChandler::start_receive()
{
_udp->async_receive_from(
boost::asio::buffer(_buf, VAST_BUFSIZ), _remote_endpoint_,
boost::bind(&net_udpNC_MChandler::handle_input, this,
boost::asio::placeholders::error,
boost::asio::placeholders::bytes_transferred));
}
// handling incoming message
int net_udpNC_MChandler::handle_input (const boost::system::error_code& error,
std::size_t bytes_transferred)
{
RLNCHeader header;
if (!error)
{
//Store UDP messages
char *p = _buf;
memcpy(&header, p, sizeof(RLNCHeader));
if (RLNCHeader_factory::isRLNCHeader (header) && header.enc_packet_count > 1)
{
CPPDEBUG("net_udpnc_mchandler::handle_input: Encoded packet received" << std::endl);
process_encoded (bytes_transferred);
}
//Restart waiting for new packets
start_receive();
}
else {
CPPDEBUG("Error on UDP socket receive: " << error.message() << std::endl;);
}
return -1;
}
The strangest thing is that everything works if I use a default constructor without arguments (i.e. no local_endpoint), this SIGSEGV does not appear. But as soon as I change the constructor to the current one, I get the SIGSEGV.
The _io_service is a class object and it does not get destructed anywhere but the destructor, so I do not know how I can get a SIGSEGV for it...
Is there some requirement on the handler class that it has a no arguments constructor?

DBus client and server in the same process

When I create a D-Bus server (via g_bus_own_name()) and the client to it (using g_dbus_proxy_new()) in the same process and then call g_dbus_proxy_call_sync(), it never returns. However, if server and client are in separate processes, everything is ok.
The following code illustrates my problem (I am using giomm C++ bindings here):
file main.cc:
#include <giomm.h>
#include <thread>
int server_main();
int client_main();
int main() {
Gio::init();
std::thread thr_server([](){ server_main(); });
sleep(1); // give some time to server to register
std::thread thr_client([](){ client_main(); });
sleep(10); // wait for the client to finish
}
file server.cc:
#include <giomm.h>
#include <iostream>
namespace {
static Glib::RefPtr<Gio::DBus::NodeInfo> introspection_data;
static Glib::ustring introspection_xml =
"<node name='/org/glibmm/DBusExample'>"
" <interface name='org.glibmm.DBusExample'>"
" <method name='Method'>"
" </method>"
" </interface>"
"</node>";
guint registered_id = 0;
}
static void on_method_call(const Glib::RefPtr<Gio::DBus::Connection>& /* connection */,
const Glib::ustring& /* sender */, const Glib::ustring& /* object_path */,
const Glib::ustring& /* interface_name */, const Glib::ustring& method_name,
const Glib::VariantContainerBase& parameters,
const Glib::RefPtr<Gio::DBus::MethodInvocation>& invocation)
{
if(method_name == "Method") {
std::cout << "Method was called\n";
}
}
const Gio::DBus::InterfaceVTable interface_vtable(sigc::ptr_fun(&on_method_call));
void on_bus_acquired(const Glib::RefPtr<Gio::DBus::Connection>& connection, const Glib::ustring& /* name */)
{
std::cout << "on_bus_acquired\n";
try {
registered_id = connection->register_object("/org/glibmm/DBusExample",
introspection_data->lookup_interface(),
interface_vtable);
}
catch(const Glib::Error& ex) {
std::cerr << "Registration of object failed." << std::endl;
}
return;
}
void on_name_acquired(const Glib::RefPtr<Gio::DBus::Connection>& /* connection */, const Glib::ustring& /* name */)
{}
void on_name_lost(const Glib::RefPtr<Gio::DBus::Connection>& connection, const Glib::ustring& /* name */) {
connection->unregister_object(registered_id);
}
int server_main()
{
try {
introspection_data = Gio::DBus::NodeInfo::create_for_xml(introspection_xml);
}
catch(const Glib::Error& ex) {
std::cerr << "Unable to create introspection data: " << ex.what() <<
"." << std::endl;
return 1;
}
const guint id = Gio::DBus::own_name(Gio::DBus::BUS_TYPE_SESSION,
"org.glibmm.DBusExample",
sigc::ptr_fun(&on_bus_acquired),
sigc::ptr_fun(&on_name_acquired),
sigc::ptr_fun(&on_name_lost));
//Keep the service running
auto loop = Glib::MainLoop::create();
loop->run();
Gio::DBus::unown_name(id);
return EXIT_SUCCESS;
}
file client.cc:
#include <giomm.h>
#include <iostream>
Glib::RefPtr<Glib::MainLoop> loop;
// A main loop idle callback to quit when the main loop is idle.
bool on_main_loop_idle() {
std::cout << "loop_idle\n";
loop->quit();
return false;
}
void on_dbus_proxy_available(Glib::RefPtr<Gio::AsyncResult>& result)
{
auto proxy = Gio::DBus::Proxy::create_finish(result);
if(!proxy) {
std::cerr << "The proxy to the user's session bus was not successfully "
"created." << std::endl;
loop->quit();
return;
}
try {
std::cout << "Calling...\n";
proxy->call_sync("Method");
std::cout << "It works!\n";
}
catch(const Glib::Error& error) {
std::cerr << "Got an error: '" << error.what() << "'." << std::endl;
}
// Connect an idle callback to the main loop to quit when the main loop is
// idle now that the method call is finished.
Glib::signal_idle().connect(sigc::ptr_fun(&on_main_loop_idle));
}
int client_main() {
loop = Glib::MainLoop::create();
auto connection =
Gio::DBus::Connection::get_sync(Gio::DBus::BUS_TYPE_SESSION);
if(!connection) {
std::cerr << "The user's session bus is not available." << std::endl;
return 1;
}
// Create the proxy to the bus asynchronously.
Gio::DBus::Proxy::create(connection, "org.glibmm.DBusExample",
"/org/glibmm/DBusExample", "org.glibmm.DBusExample",
sigc::ptr_fun(&on_dbus_proxy_available));
loop->run();
return EXIT_SUCCESS;
}
I compile the test with g++ -O2 -std=c++0x main.cc server.cc client.cc -o test $(pkg-config --cflags --libs giomm-2.4) and run:
./test
on_bus_acquired
Calling...
<it hangs>
However, when I change main.cc:
#include <giomm.h>
int server_main();
int client_main();
int main() {
Gio::init();
auto childid = fork();
if (childid == 0) {
server_main();
} else {
sleep(1);
client_main();
}
}
I get:
./test
on_bus_acquired
Calling...
Method was called
It works!
So call_sync() returns successfully.
I tried to exclude loops from server and client, and use a single-threaded main.cc:
#include <giomm.h>
#include <thread>
int server_main();
int client_main();
int main() {
Gio::init();
server_main();
client_main();
auto loop = Glib::MainLoop::create();
loop->run();
}
Nothing helps. The question is, what am I doing wrong? I want to use my d-bus server and client in one process.
I figured it out, the trick is to execute
Glib::VariantContainerBase result;
invocation->return_value(result);
in the end of on_method_call.

Using Point Cloud Library to store Point Clouds from Kinect

Using Point Cloud Library on Ubuntu, I am trying to take multiple point clouds from the Kinect and store them in memory for later use in the program. My code shown at the bottom of this post is designed to store the first Point Cloud from the Kinect and output its width and height. The program gives me a runtime error:
/usr/include/boost/smart_ptr/shared_ptr.hpp:418: T* boost::shared_ptr<T>::operator->() const [with T = pcl::PointCloud<pcl::PointXYZ>]: Assertion `px != 0' failed.
All help is greatly appreciated and I always accept an answer!
The code:
#include <pcl/io/openni_grabber.h>
#include <pcl/visualization/cloud_viewer.h>
class SimpleOpenNIViewer
{
public:
SimpleOpenNIViewer () : viewer ("PCL OpenNI Viewer") {}
pcl::PointCloud<pcl::PointXYZ>::Ptr prevCloud;
void cloud_cb_ (const pcl::PointCloud<pcl::PointXYZ>::ConstPtr &cloud)
{
if (!viewer.wasStopped())
viewer.showCloud (cloud);
//ICP start
if(!prevCloud) {
pcl::PointCloud<pcl::PointXYZ>::Ptr prevCloud( new pcl::PointCloud<pcl::PointXYZ>());
pcl::copyPointCloud<pcl::PointXYZ, pcl::PointXYZ>(*cloud, *prevCloud);
}
cout << prevCloud->width << " by " << prevCloud->height << endl;
}
void run ()
{
pcl::Grabber* interface = new pcl::OpenNIGrabber();
boost::function<void (const pcl::PointCloud<pcl::PointXYZ>::ConstPtr&)> f =
boost::bind (&SimpleOpenNIViewer::cloud_cb_, this, _1);
interface->registerCallback (f);
interface->start ();
while (!viewer.wasStopped())
{
boost::this_thread::sleep (boost::posix_time::seconds (1));
}
interface->stop ();
}
pcl::visualization::CloudViewer viewer;
};
int main ()
{
SimpleOpenNIViewer v;
v.run ();
return 0;
}
Try this, I don't have the Kinect drivers installed so I can't test. Basically in my version prevCloud is instantiated in the constructor so (!prevCloud) will always equal 'false'. Which is to say prevCloud.get() != NULL.
#include <pcl/io/openni_grabber.h>
#include <pcl/visualization/cloud_viewer.h>
class SimpleOpenNIViewer
{
typedef pcl::PointXYZ Point;
typedef pcl::PointCloud<Point> PointCloud;
public:
SimpleOpenNIViewer () : viewer ("PCL OpenNI Viewer") {
prevCloud = PointCloud::Ptr(NULL);
}
void cloud_cb_ (const PointCloud::ConstPtr &cloud)
{
if (!viewer.wasStopped())
viewer.showCloud (cloud);
if (!prevCloud) // init previous cloud if first frame
prevCloud = PointCloud::Ptr(new PointCloud);
else. // else RunICP between cloud and prevCloud
//RunICP(cloud,prevCloud);
//Copy new frame in to prevCloud
pcl::copyPointCloud<Point, Point>(*cloud, *prevCloud);
cout << prevCloud->width << " by " << prevCloud->height << endl;
}
void run ()
{
pcl::Grabber* interface = new pcl::OpenNIGrabber();
boost::function<void (const PointCloud::ConstPtr&)> f =
boost::bind (&SimpleOpenNIViewer::cloud_cb_, this, _1);
interface->registerCallback (f);
interface->start ();
while (!viewer.wasStopped())
{
boost::this_thread::sleep (boost::posix_time::seconds (1));
}
interface->stop ();
}
PointCloud::Ptr prevCloud;
pcl::visualization::CloudViewer viewer;
};
int main ()
{
SimpleOpenNIViewer v;
v.run ();
return 0;
}
You're creating a new local variable prevCloud and copying cloud into it, rather than copying into the prevCloud field. So, if the field's value was null before the if {}, it is still null after the if {} and so it throws an error when you try to dereference it.
May be this code can help you, the cloud is saved in a "pcd" file, take a look here
And other option is work with the "Kinfu" project from PCL