SIGSEGV on second call to boost::asio::udp socket::async_recv on worker boost::thread - boost-asio

I get a SIGSEGV in following class on the second time I call the start_receive(). It works correctly in my open() function, but seems to fail when input is received and I try restarting listen for more input:
#0 0x0000555555584154 in boost::asio::basic_io_object<boost::asio::datagram_socket_service<boost::asio::ip::udp>, true>::get_service (this=0x100007f00000000)
at /usr/include/boost/asio/basic_io_object.hpp:225
#1 0x000055555558398b in boost::asio::basic_datagram_socket<boost::asio::ip::udp, boost::asio::datagram_socket_service<boost::asio::ip::udp> >::async_receive_from<boost::asio::mutable_buffers_1, boost::_bi::bind_t<int, boost::_mfi::mf2<int, Vast::net_udpNC_MChandler, boost::system::error_code const&, unsigned long>, boost::_bi::list3<boost::_bi::value<Vast::net_udpNC_MChandler*>, boost::arg<1> (*)(), boost::arg<2> (*)()> > > (this=0x100007f00000000, buffers=...,
sender_endpoint=..., handler=...)
at /usr/include/boost/asio/basic_datagram_socket.hpp:895
#2 0x000055555557a889 in Vast::net_udpNC_MChandler::start_receive (
this=0x7fffffff5c70) at net_udpnc_mchandler.cpp:58
#3 0x000055555557aa77 in Vast::net_udpNC_MChandler::handle_input (
this=0x7fffffff5c70, error=..., bytes_transferred=24)
at net_udpnc_mchandler.cpp:100
#4 0x000055555557abb3 in Vast::net_udpNC_MChandler::handle_buffer (
this=0x7fffffff5c70, buf=0x7fffffffdad0 "\035\300", bytes_transferred=24)
at net_udpnc_mchandler.cpp:114
#5 0x000055555556397f in test_process_encoded ()
at unittest_net_udpnc_mchandler.cpp:43
#6 0x000055555556400e in main () at unittest_net_udpnc_mchandler.cpp:101
Header:
class net_udpNC_MChandler
{
public:
net_udpNC_MChandler(ip::udp::endpoint local_endpoint);
//MChandler will run its own io_service
int open (AbstractRLNCMsgReceiver *msghandler);
int handle_buffer (char *buf, std::size_t bytes_transferred);
protected:
//Start the receiving loop
void start_receive ();
// handling incoming message
int handle_input (const boost::system::error_code& error,
std::size_t bytes_transferred);
private:
ip::udp::socket *_udp;
ip::udp::endpoint _remote_endpoint_;
ip::udp::endpoint _local_endpoint;
ip::udp::endpoint MC_address;
char _buf[VAST_BUFSIZ];
AbstractRLNCMsgReceiver *_msghandler = NULL;
io_service *_io_service;
boost::thread *_iosthread;
};
Source file:
net_udpNC_MChandler::net_udpNC_MChandler(ip::udp::endpoint local_endpoint) :
MC_address(ip::address::from_string("239.255.0.1"), 1037)
{
_io_service = new io_service();
_local_endpoint = local_endpoint;
}
int net_udpNC_MChandler::open(AbstractRLNCMsgReceiver *msghandler) {
_msghandler = msghandler;
if (_udp == NULL) {
_udp = new ip::udp::socket(*_io_service);
_udp->open(ip::udp::v4());
_udp->set_option(ip::udp::socket::reuse_address(true));
_udp->set_option(ip::multicast::join_group(MC_address.address ()));
boost::system::error_code ec;
_udp->bind(MC_address, ec);
std::cout << "net_udpnc_mchandler::open " + ec.message() << std::endl;
if (ec)
{
std::cout << "net_udpnc_mchandler:: open MC address failed" << ec.message() << std::endl;
}
//Add async receive to io_service queue
start_receive();
std::cout << "net_udpnc_mchandler::open _udp->_local_endpoint: " << _udp->local_endpoint() << " _local_endpoint" << _local_endpoint << std::endl;
//Start the thread handling async receives
_iosthread = new boost::thread(boost::bind(&boost::asio::io_service::run, _io_service));
}
return 0;
}
void net_udpNC_MChandler::start_receive()
{
_udp->async_receive_from(
boost::asio::buffer(_buf, VAST_BUFSIZ), _remote_endpoint_,
boost::bind(&net_udpNC_MChandler::handle_input, this,
boost::asio::placeholders::error,
boost::asio::placeholders::bytes_transferred));
}
// handling incoming message
int net_udpNC_MChandler::handle_input (const boost::system::error_code& error,
std::size_t bytes_transferred)
{
RLNCHeader header;
if (!error)
{
//Store UDP messages
char *p = _buf;
memcpy(&header, p, sizeof(RLNCHeader));
if (RLNCHeader_factory::isRLNCHeader (header) && header.enc_packet_count > 1)
{
CPPDEBUG("net_udpnc_mchandler::handle_input: Encoded packet received" << std::endl);
process_encoded (bytes_transferred);
}
//Restart waiting for new packets
start_receive();
}
else {
CPPDEBUG("Error on UDP socket receive: " << error.message() << std::endl;);
}
return -1;
}
The strangest thing is that everything works if I use a default constructor without arguments (i.e. no local_endpoint), this SIGSEGV does not appear. But as soon as I change the constructor to the current one, I get the SIGSEGV.
The _io_service is a class object and it does not get destructed anywhere but the destructor, so I do not know how I can get a SIGSEGV for it...
Is there some requirement on the handler class that it has a no arguments constructor?

Related

Is there a way to specify the ssh port to be used by libgit2

This code block works perfectly well if I use port 22 to connect in ssh (rather usual 22 basic port of ssh)
But for some reason, I need to connect to ssh on port 443. And at this moment the code doesn't work anymore and tries to connect always on port 22.
So I ask if there is a method to tell the libgit2 to use port 443 for ssh connection.
int creds(git_cred **out, const char *url, const char *username_from_url,
unsigned int allowed_types, void *payload) {
std::cout << "Calling creds" << std::endl;
int result = git_credential_ssh_key_new(out, username_from_url,"path/to/ssh_key_public", "path/to/ssh_key_private", "");
if (result != 0) {
std::cout << giterr_last()->message << std::endl;
throw;
return result;
}
return result;
}
int main() {
git_libgit2_init();
git_repository *repo = nullptr;
git_remote *local_remote = nullptr;
std::string repo_path = "/Users/luclambour/Desktop/repo_test_libgit/git-test-clone";
int error = 0;
error = git_repository_open(&repo, repo_path.data());
std::cout<<error<<std::endl;
if (error != 0) { std::cout << giterr_last()->message << std::endl;}
error =git_remote_create_anonymous(&local_remote, repo, "root#138.1.1.1:/root/luc_clone_des/clone_simple");
if (error != 0) { std::cout << giterr_last()->message << std::endl;}
git_remote_callbacks callbacks = GIT_REMOTE_CALLBACKS_INIT;
callbacks.credentials=creds;
error = git_remote_connect(local_remote, GIT_DIRECTION_PUSH, &callbacks, NULL, NULL);
if (error != 0) { std::cout << giterr_last()->message << std::endl;}
git_push_options opts = GIT_PUSH_OPTIONS_INIT;
opts.callbacks.credentials = creds;
opts.proxy_opts.type = GIT_PROXY_NONE;
error = git_remote_push(local_remote,NULL,&opts);
if (error != 0) { std::cout << giterr_last()->message << std::endl;}
git_remote_free(local_remote);
git_repository_free(repo);
git_libgit2_shutdown();
return 0;
}
scp-like syntax doesn't support port number, so you should use ssh:// URL syntax as:
git_remote_create_anonymous(&local_remote, repo, "ssh://root#138.1.1.1:443/root/luc_clone_des/clone_simple");
Details can be found in the "GIT URLS" section of git-clone(1).
https://git-scm.com/docs/git-clone#_git_urls

How to co_await for a change in a variable using boost coroutine ts?

Context
I build a webserver using boost coroutine ts, boost asio and boost beast.
There is a coroutine for reading and one for writing.
There is a message_to_send queue where messages get pushed to send to the user.
The writing coroutine checks if there is something in the message_to_send queue and sends it.
After sending the writing coroutine suspends itself for 100 milliseconds and checks again for something to write.
Problem
The writing coroutine is polling the message queue every 100 milliseconds. I like to find a solution without polling after some timer has fired.
Posible solution
Maybe ther is a solution to co_await the change of a variable. Maybe creating a async_wait_for_callback with "async_initiate"?
Code example
You can clone the project. Or use the complete example code posted here:
#include <algorithm>
#include <boost/asio.hpp>
#include <boost/asio/io_context.hpp>
#include <boost/asio/system_timer.hpp>
#include <boost/beast.hpp>
#include <boost/beast/websocket.hpp>
#include <boost/bind/bind.hpp>
#include <boost/optional.hpp>
#include <chrono>
#include <cstddef>
#include <deque>
#include <exception>
#include <iostream>
#include <list>
#include <memory>
#include <set>
#include <stdexcept>
#include <string>
// TODO use cmake to find out if the compiler is gcc or clang
#include <coroutine> // enable if build with gcc
// #include <experimental/coroutine> //enable if build with clang
using namespace boost::beast;
using namespace boost::asio;
typedef boost::asio::use_awaitable_t<>::as_default_on_t<boost::asio::basic_waitable_timer<boost::asio::chrono::system_clock>> CoroTimer;
typedef boost::beast::websocket::stream<boost::beast::tcp_stream> Websocket;
using namespace boost::beast;
using namespace boost::asio;
using boost::asio::ip::tcp;
using tcp_acceptor = use_awaitable_t<>::as_default_on_t<tcp::acceptor>;
struct User
{
boost::asio::awaitable<void> writeToClient (std::weak_ptr<Websocket> &connection);
std::deque<std::string> msgQueue{};
std::shared_ptr<CoroTimer> timer{};
};
void
handleMessage (std::string const &msg, std::list<std::shared_ptr<User>> &users, std::shared_ptr<User> user)
{
std::cout << "please implement handle message" << std::endl;
user->msgQueue.push_back ("please implement handle message");
user->timer->cancel ();
}
boost::asio::awaitable<void>
User::writeToClient (std::weak_ptr<Websocket> &connection)
{
try
{
while (not connection.expired ())
{
timer = std::make_shared<CoroTimer> (CoroTimer{ co_await this_coro::executor });
timer->expires_after (std::chrono::system_clock::time_point::max () - std::chrono::system_clock::now ());
try
{
co_await timer->async_wait ();
}
catch (boost::system::system_error &e)
{
using namespace boost::system::errc;
if (operation_canceled == e.code ())
{
// swallow cancel
}
else
{
std::cout << "error in timer boost::system::errc: " << e.code () << std::endl;
abort ();
}
}
while (not msgQueue.empty () && not connection.expired ())
{
auto tmpMsg = std::move (msgQueue.front ());
std::cout << " msg: " << tmpMsg << std::endl;
msgQueue.pop_front ();
co_await connection.lock ()->async_write (buffer (tmpMsg), use_awaitable);
}
}
}
catch (std::exception &e)
{
std::cout << "write Exception: " << e.what () << std::endl;
}
}
class Server
{
public:
Server (boost::asio::ip::tcp::endpoint const &endpoint);
boost::asio::awaitable<void> listener ();
private:
void removeUser (std::list<std::shared_ptr<User>>::iterator user);
boost::asio::awaitable<std::string> my_read (Websocket &ws_);
boost::asio::awaitable<void> readFromClient (std::list<std::shared_ptr<User>>::iterator user, Websocket &connection);
boost::asio::ip::tcp::endpoint _endpoint{};
std::list<std::shared_ptr<User>> users{};
};
namespace this_coro = boost::asio::this_coro;
Server::Server (boost::asio::ip::tcp::endpoint const &endpoint) : _endpoint{ endpoint } {}
awaitable<std::string>
Server::my_read (Websocket &ws_)
{
std::cout << "read" << std::endl;
flat_buffer buffer;
co_await ws_.async_read (buffer, use_awaitable);
auto msg = buffers_to_string (buffer.data ());
std::cout << "number of letters '" << msg.size () << "' msg: '" << msg << "'" << std::endl;
co_return msg;
}
awaitable<void>
Server::readFromClient (std::list<std::shared_ptr<User>>::iterator user, Websocket &connection)
{
try
{
for (;;)
{
auto readResult = co_await my_read (connection);
handleMessage (readResult, users, *user);
}
}
catch (std::exception &e)
{
removeUser (user);
std::cout << "read Exception: " << e.what () << std::endl;
}
}
void
Server::removeUser (std::list<std::shared_ptr<User>>::iterator user)
{
users.erase (user);
}
awaitable<void>
Server::listener ()
{
auto executor = co_await this_coro::executor;
tcp_acceptor acceptor (executor, _endpoint);
for (;;)
{
try
{
auto socket = co_await acceptor.async_accept ();
auto connection = std::make_shared<Websocket> (std::move (socket));
users.emplace_back (std::make_shared<User> ());
std::list<std::shared_ptr<User>>::iterator user = std::next (users.end (), -1);
connection->set_option (websocket::stream_base::timeout::suggested (role_type::server));
connection->set_option (websocket::stream_base::decorator ([] (websocket::response_type &res) { res.set (http::field::server, std::string (BOOST_BEAST_VERSION_STRING) + " websocket-server-async"); }));
co_await connection->async_accept (use_awaitable);
co_spawn (
executor, [connection, this, &user] () mutable { return readFromClient (user, *connection); }, detached);
co_spawn (
executor, [connectionWeakPointer = std::weak_ptr<Websocket>{ connection }, &user] () mutable { return user->get ()->writeToClient (connectionWeakPointer); }, detached);
}
catch (std::exception &e)
{
std::cout << "Server::listener () connect Exception : " << e.what () << std::endl;
}
}
}
auto const DEFAULT_PORT = u_int16_t{ 55555 };
int
main ()
{
try
{
using namespace boost::asio;
io_context io_context (1);
signal_set signals (io_context, SIGINT, SIGTERM);
signals.async_wait ([&] (auto, auto) { io_context.stop (); });
auto server = Server{ { ip::tcp::v4 (), DEFAULT_PORT } };
co_spawn (
io_context, [&server] { return server.listener (); }, detached);
io_context.run ();
}
catch (std::exception &e)
{
std::printf ("Exception: %s\n", e.what ());
}
return 0;
}
EDIT: updated code based on sehe's idea which is marked as answer.
The classical threading solution would be a condition variable. Of course, that's not what you want - I see you even explicitly disabled ASIO threading. Good.
One way - short of providing an Asio service to implement this behaviour - would be to use timers to emulate condition variables. You could use timer that "never" expires (deadline is at timepoint::max()) and manually reset it to timepoint::min() (canceling any async_wait) or any time in the past to signify the condition. Then you can use Timer::async_wait with use_awaitable like you already know how.
Note that you still need to "manually" signal the change. This is what you want because anything else requires kernel process tracing support/hardware debugger facilities which require massive priviliges and tend to be very slow.
You might want to know about associating the use_awaitable as the default completion token for the executor bound to your timer. See e.g. the examples: https://www.boost.org/doc/libs/1_78_0/doc/html/boost_asio/example/cpp17/coroutines_ts/echo_server_with_default.cpp (the HTML docs do NOT link these examples)

What is the problem with generated SPIR-V code and how to verify it?

I have some generated SPIR-V code which I want to use with the vulkan API. But I get an
Exception thrown at 0x00007FFB68D933CB (nvoglv64.dll) in vulkanCompute.exe: 0xC0000005: Access violation reading location 0x0000000000000008. when trying to create the pipline with vkCreateComputePipelines.
The API calls should be fine, because the same code works with a shader compiled with glslangValidator. Therefore I assume that the generated SPIR-V code must be illformed somehow.
I've checked the SPIR-V code with the validator tool from khronos, using spirv-val --target-env vulkan1.1 mainV.spv which exited without error. Anyhow it is also known that this tool is still incomplete.
I've also tried to use the Radeon GPU Analyzer to compile my SPIR-V code, which is also available online at the shader playground and this tool throws the error Error: Error: internal error: Bil::BilInstructionConvert::Create(60) Code Not Tested! which is not really helpful, but encourages the assumption that the code is malformed.
The SPIR-V code is unfortunately to long to post it here, but it is in the link of the shader playground.
Does anyone know what the problem is with my setting or has any idea how I can verify my SPIR-V code in a better way, without checking all 700 lines of code manually.
I don't thinkt the problem is there, but anyway here is the c++ host code:
#include "vulkan/vulkan.hpp"
#include <iostream>
#include <fstream>
#include <vector>
#define BAIL_ON_BAD_RESULT(result) \
if (VK_SUCCESS != (result)) \
{ \
fprintf(stderr, "Failure at %u %s\n", __LINE__, __FILE__); \
exit(-1); \
}
VkResult vkGetBestComputeQueueNPH(vk::PhysicalDevice &physicalDevice, uint32_t &queueFamilyIndex)
{
auto properties = physicalDevice.getQueueFamilyProperties();
int i = 0;
for (auto prop : properties)
{
vk::QueueFlags maskedFlags = (~(vk::QueueFlagBits::eTransfer | vk::QueueFlagBits::eSparseBinding) & prop.queueFlags);
if (!(vk::QueueFlagBits::eGraphics & maskedFlags) && (vk::QueueFlagBits::eCompute & maskedFlags))
{
queueFamilyIndex = i;
return VK_SUCCESS;
}
i++;
}
i = 0;
for (auto prop : properties)
{
vk::QueueFlags maskedFlags = (~(vk::QueueFlagBits::eTransfer | vk::QueueFlagBits::eSparseBinding) & prop.queueFlags);
if (vk::QueueFlagBits::eCompute & maskedFlags)
{
queueFamilyIndex = i;
return VK_SUCCESS;
}
i++;
}
return VK_ERROR_INITIALIZATION_FAILED;
}
int main(int argc, const char *const argv[])
{
(void)argc;
(void)argv;
try
{
// initialize the vk::ApplicationInfo structure
vk::ApplicationInfo applicationInfo("VecAdd", 1, "Vulkan.hpp", 1, VK_API_VERSION_1_1);
// initialize the vk::InstanceCreateInfo
std::vector<char *> layers = {
"VK_LAYER_LUNARG_api_dump",
"VK_LAYER_KHRONOS_validation"
};
vk::InstanceCreateInfo instanceCreateInfo({}, &applicationInfo, static_cast<uint32_t>(layers.size()), layers.data());
// create a UniqueInstance
vk::UniqueInstance instance = vk::createInstanceUnique(instanceCreateInfo);
auto physicalDevices = instance->enumeratePhysicalDevices();
for (auto &physicalDevice : physicalDevices)
{
auto props = physicalDevice.getProperties();
// get the QueueFamilyProperties of the first PhysicalDevice
std::vector<vk::QueueFamilyProperties> queueFamilyProperties = physicalDevice.getQueueFamilyProperties();
uint32_t computeQueueFamilyIndex = 0;
// get the best index into queueFamiliyProperties which supports compute and stuff
BAIL_ON_BAD_RESULT(vkGetBestComputeQueueNPH(physicalDevice, computeQueueFamilyIndex));
std::vector<char *>extensions = {"VK_EXT_external_memory_host", "VK_KHR_shader_float16_int8"};
// create a UniqueDevice
float queuePriority = 0.0f;
vk::DeviceQueueCreateInfo deviceQueueCreateInfo(vk::DeviceQueueCreateFlags(), static_cast<uint32_t>(computeQueueFamilyIndex), 1, &queuePriority);
vk::StructureChain<vk::DeviceCreateInfo, vk::PhysicalDeviceFeatures2, vk::PhysicalDeviceShaderFloat16Int8Features> createDeviceInfo = {
vk::DeviceCreateInfo(vk::DeviceCreateFlags(), 1, &deviceQueueCreateInfo, 0, nullptr, static_cast<uint32_t>(extensions.size()), extensions.data()),
vk::PhysicalDeviceFeatures2(),
vk::PhysicalDeviceShaderFloat16Int8Features()
};
createDeviceInfo.get<vk::PhysicalDeviceFeatures2>().features.setShaderInt64(true);
createDeviceInfo.get<vk::PhysicalDeviceShaderFloat16Int8Features>().setShaderInt8(true);
vk::UniqueDevice device = physicalDevice.createDeviceUnique(createDeviceInfo.get<vk::DeviceCreateInfo>());
auto memoryProperties2 = physicalDevice.getMemoryProperties2();
vk::PhysicalDeviceMemoryProperties const &memoryProperties = memoryProperties2.memoryProperties;
const int32_t bufferLength = 16384;
const uint32_t bufferSize = sizeof(int32_t) * bufferLength;
// we are going to need two buffers from this one memory
const vk::DeviceSize memorySize = bufferSize * 3;
// set memoryTypeIndex to an invalid entry in the properties.memoryTypes array
uint32_t memoryTypeIndex = VK_MAX_MEMORY_TYPES;
for (uint32_t k = 0; k < memoryProperties.memoryTypeCount; k++)
{
if ((vk::MemoryPropertyFlagBits::eHostVisible | vk::MemoryPropertyFlagBits::eHostCoherent) & memoryProperties.memoryTypes[k].propertyFlags &&
(memorySize < memoryProperties.memoryHeaps[memoryProperties.memoryTypes[k].heapIndex].size))
{
memoryTypeIndex = k;
std::cout << "found memory " << memoryTypeIndex + 1 << " out of " << memoryProperties.memoryTypeCount << std::endl;
break;
}
}
BAIL_ON_BAD_RESULT(memoryTypeIndex == VK_MAX_MEMORY_TYPES ? VK_ERROR_OUT_OF_HOST_MEMORY : VK_SUCCESS);
auto memory = device->allocateMemoryUnique(vk::MemoryAllocateInfo(memorySize, memoryTypeIndex));
auto in_buffer = device->createBufferUnique(vk::BufferCreateInfo(vk::BufferCreateFlags(), bufferSize, vk::BufferUsageFlagBits::eStorageBuffer, vk::SharingMode::eExclusive));
device->bindBufferMemory(in_buffer.get(), memory.get(), 0);
// create a DescriptorSetLayout
std::vector<vk::DescriptorSetLayoutBinding> descriptorSetLayoutBinding{
{0, vk::DescriptorType::eStorageBuffer, 1, vk::ShaderStageFlagBits::eCompute}};
vk::UniqueDescriptorSetLayout descriptorSetLayout = device->createDescriptorSetLayoutUnique(vk::DescriptorSetLayoutCreateInfo(vk::DescriptorSetLayoutCreateFlags(), static_cast<uint32_t>(descriptorSetLayoutBinding.size()), descriptorSetLayoutBinding.data()));
std::cout << "Memory bound" << std::endl;
std::ifstream myfile;
myfile.open("shaders/MainV.spv", std::ios::ate | std::ios::binary);
if (!myfile.is_open())
{
std::cout << "File not found" << std::endl;
return EXIT_FAILURE;
}
auto size = myfile.tellg();
std::vector<unsigned int> shader_spv(size / sizeof(unsigned int));
myfile.seekg(0);
myfile.read(reinterpret_cast<char *>(shader_spv.data()), size);
myfile.close();
std::cout << "Shader size: " << shader_spv.size() << std::endl;
auto shaderModule = device->createShaderModuleUnique(vk::ShaderModuleCreateInfo(vk::ShaderModuleCreateFlags(), shader_spv.size() * sizeof(unsigned int), shader_spv.data()));
// create a PipelineLayout using that DescriptorSetLayout
vk::UniquePipelineLayout pipelineLayout = device->createPipelineLayoutUnique(vk::PipelineLayoutCreateInfo(vk::PipelineLayoutCreateFlags(), 1, &descriptorSetLayout.get()));
vk::ComputePipelineCreateInfo computePipelineInfo(
vk::PipelineCreateFlags(),
vk::PipelineShaderStageCreateInfo(
vk::PipelineShaderStageCreateFlags(),
vk::ShaderStageFlagBits::eCompute,
shaderModule.get(),
"_ZTSZZ4mainENK3$_0clERN2cl4sycl7handlerEE6VecAdd"),
pipelineLayout.get());
auto pipeline = device->createComputePipelineUnique(nullptr, computePipelineInfo);
auto descriptorPoolSize = vk::DescriptorPoolSize(vk::DescriptorType::eStorageBuffer, 2);
auto descriptorPool = device->createDescriptorPool(vk::DescriptorPoolCreateInfo(vk::DescriptorPoolCreateFlags(), 1, 1, &descriptorPoolSize));
auto commandPool = device->createCommandPoolUnique(vk::CommandPoolCreateInfo(vk::CommandPoolCreateFlags(), computeQueueFamilyIndex));
auto commandBuffer = std::move(device->allocateCommandBuffersUnique(vk::CommandBufferAllocateInfo(commandPool.get(), vk::CommandBufferLevel::ePrimary, 1)).front());
commandBuffer->begin(vk::CommandBufferBeginInfo(vk::CommandBufferUsageFlags(vk::CommandBufferUsageFlagBits::eOneTimeSubmit)));
commandBuffer->bindPipeline(vk::PipelineBindPoint::eCompute, pipeline.get());
commandBuffer->dispatch(bufferSize / sizeof(int32_t), 1, 1);
commandBuffer->end();
auto queue = device->getQueue(computeQueueFamilyIndex, 0);
vk::SubmitInfo submitInfo(0, nullptr, nullptr, 1, &commandBuffer.get(), 0, nullptr);
queue.submit(1, &submitInfo, vk::Fence());
queue.waitIdle();
printf("all done\nWoohooo!!!\n\n");
}
}
catch (vk::SystemError &err)
{
std::cout << "vk::SystemError: " << err.what() << std::endl;
exit(-1);
}
catch (std::runtime_error &err)
{
std::cout << "std::runtime_error: " << err.what() << std::endl;
exit(-1);
}
catch (...)
{
std::cout << "unknown error\n";
exit(-1);
}
return EXIT_SUCCESS;
}
Well after checking out line per line it showed that the problem is when working with pointers of pointers. For me it is still not clear from the specification that it is not allowed, but it is understandable that it does not work with logical pointers.
Still the behaviour is strange that the validator is not able to note that and that compiling the SPIRV code crashes instead of throwing a clear error message.
So in the end, it was the Shader code which was wrong.

asio use_future instead of yield[ec]

i want to make container of futures ,each future is void result of a task so that i could use wait_for_any on the container ,each task is coroutine which i currently implement using yield_context,and inside this coroutine there initiating function which returns ec and result where i use ec to analyze result.and then another coroutine is called passes same yield_context .
i want to know how to make this design.
and if i ll use use_future ,how can i pass error code to ec not throwing it unless there is no way except throwing it ,in this case i ll put try and catch around async initiating functions.
all these tasks will be posted ,spawned ... on asio io_service .
this is my main parts of code:
this is the spawn of task
boost::asio::spawn(GetServiceReference(), boost::bind(&HTTPRequest::Execute, boost::placeholders::_1, m_HttpClient_request_name, Get_mHTTPClient_Responses_Map()));
and this is the coroutine using yield_context
void HTTPRequest::Execute(boost::asio::yield_context yield_r, std::string request_name, std::map<std::string, boost::shared_ptr<HTTPResponse>>& mHTTPClient_Responses_Map)
{
resolver_iterator iterator_connect = boost::asio::async_connect(mSock, iterator_resolve, yield_r[ec]);
}
and inside Execute we use ec to analyze
if (ec == boost::system::errc::errc_t::success){}
and here we start another coroutine passing same yield_context
SendRequest(yield_r);
}
i want to change this so i have container of futures for all spawned Execute,i do not care about results of Execute because i put them to member class Response.
But i need result in future so that i can use wait_any on the container .
If you can change your implementation, use the async_result pattern.
This makes it so you can use your method with any of the approaches (completion handler, yield context or use_future).
I reproduce the self-contained example from here for inspiration:
Comprehensive Demo
Showing how to use it with with
coro's and yield[ec]
coro's and yield + exceptions
std::future
completion handlers
Live On Coliru
#define BOOST_COROUTINES_NO_DEPRECATION_WARNING
#include <iostream>
#include <boost/asio.hpp>
#include <boost/asio/spawn.hpp>
#include <boost/asio/use_future.hpp>
using boost::system::error_code;
namespace asio = boost::asio;
template <typename Token>
auto async_meaning_of_life(bool success, Token&& token)
{
#if BOOST_VERSION >= 106600
using result_type = typename asio::async_result<std::decay_t<Token>, void(error_code, int)>;
typename result_type::completion_handler_type handler(std::forward<Token>(token));
result_type result(handler);
#else
typename asio::handler_type<Token, void(error_code, int)>::type
handler(std::forward<Token>(token));
asio::async_result<decltype (handler)> result (handler);
#endif
if (success)
handler(error_code{}, 42);
else
handler(asio::error::operation_aborted, 0);
return result.get ();
}
void using_yield_ec(asio::yield_context yield) {
for (bool success : { true, false }) {
boost::system::error_code ec;
auto answer = async_meaning_of_life(success, yield[ec]);
std::cout << __FUNCTION__ << ": Result: " << ec.message() << "\n";
std::cout << __FUNCTION__ << ": Answer: " << answer << "\n";
}
}
void using_yield_catch(asio::yield_context yield) {
for (bool success : { true, false })
try {
auto answer = async_meaning_of_life(success, yield);
std::cout << __FUNCTION__ << ": Answer: " << answer << "\n";
} catch(boost::system::system_error const& e) {
std::cout << __FUNCTION__ << ": Caught: " << e.code().message() << "\n";
}
}
void using_future() {
for (bool success : { true, false })
try {
auto answer = async_meaning_of_life(success, asio::use_future);
std::cout << __FUNCTION__ << ": Answer: " << answer.get() << "\n";
} catch(boost::system::system_error const& e) {
std::cout << __FUNCTION__ << ": Caught: " << e.code().message() << "\n";
}
}
void using_handler() {
for (bool success : { true, false })
async_meaning_of_life(success, [](error_code ec, int answer) {
std::cout << "using_handler: Result: " << ec.message() << "\n";
std::cout << "using_handler: Answer: " << answer << "\n";
});
}
int main() {
asio::io_service svc;
spawn(svc, using_yield_ec);
spawn(svc, using_yield_catch);
std::thread work([] {
using_future();
using_handler();
});
svc.run();
work.join();
}
Prints:
using_yield_ec: Result: Success
using_yield_ec: Answer: 42
using_yield_ec: Result: Operation canceled
using_yield_ec: Answer: 0
using_future: Answer: 42
using_yield_catch: Answer: 42
using_yield_catch: Caught: Operation canceled
using_future: Caught: Operation canceled
using_handler: Result: Success
using_handler: Answer: 42
using_handler: Result: Operation canceled
using_handler: Answer: 0
Note: for simplicity I have not added output synchronization, so the output can become intermingled depending on runtime execution order

DBus client and server in the same process

When I create a D-Bus server (via g_bus_own_name()) and the client to it (using g_dbus_proxy_new()) in the same process and then call g_dbus_proxy_call_sync(), it never returns. However, if server and client are in separate processes, everything is ok.
The following code illustrates my problem (I am using giomm C++ bindings here):
file main.cc:
#include <giomm.h>
#include <thread>
int server_main();
int client_main();
int main() {
Gio::init();
std::thread thr_server([](){ server_main(); });
sleep(1); // give some time to server to register
std::thread thr_client([](){ client_main(); });
sleep(10); // wait for the client to finish
}
file server.cc:
#include <giomm.h>
#include <iostream>
namespace {
static Glib::RefPtr<Gio::DBus::NodeInfo> introspection_data;
static Glib::ustring introspection_xml =
"<node name='/org/glibmm/DBusExample'>"
" <interface name='org.glibmm.DBusExample'>"
" <method name='Method'>"
" </method>"
" </interface>"
"</node>";
guint registered_id = 0;
}
static void on_method_call(const Glib::RefPtr<Gio::DBus::Connection>& /* connection */,
const Glib::ustring& /* sender */, const Glib::ustring& /* object_path */,
const Glib::ustring& /* interface_name */, const Glib::ustring& method_name,
const Glib::VariantContainerBase& parameters,
const Glib::RefPtr<Gio::DBus::MethodInvocation>& invocation)
{
if(method_name == "Method") {
std::cout << "Method was called\n";
}
}
const Gio::DBus::InterfaceVTable interface_vtable(sigc::ptr_fun(&on_method_call));
void on_bus_acquired(const Glib::RefPtr<Gio::DBus::Connection>& connection, const Glib::ustring& /* name */)
{
std::cout << "on_bus_acquired\n";
try {
registered_id = connection->register_object("/org/glibmm/DBusExample",
introspection_data->lookup_interface(),
interface_vtable);
}
catch(const Glib::Error& ex) {
std::cerr << "Registration of object failed." << std::endl;
}
return;
}
void on_name_acquired(const Glib::RefPtr<Gio::DBus::Connection>& /* connection */, const Glib::ustring& /* name */)
{}
void on_name_lost(const Glib::RefPtr<Gio::DBus::Connection>& connection, const Glib::ustring& /* name */) {
connection->unregister_object(registered_id);
}
int server_main()
{
try {
introspection_data = Gio::DBus::NodeInfo::create_for_xml(introspection_xml);
}
catch(const Glib::Error& ex) {
std::cerr << "Unable to create introspection data: " << ex.what() <<
"." << std::endl;
return 1;
}
const guint id = Gio::DBus::own_name(Gio::DBus::BUS_TYPE_SESSION,
"org.glibmm.DBusExample",
sigc::ptr_fun(&on_bus_acquired),
sigc::ptr_fun(&on_name_acquired),
sigc::ptr_fun(&on_name_lost));
//Keep the service running
auto loop = Glib::MainLoop::create();
loop->run();
Gio::DBus::unown_name(id);
return EXIT_SUCCESS;
}
file client.cc:
#include <giomm.h>
#include <iostream>
Glib::RefPtr<Glib::MainLoop> loop;
// A main loop idle callback to quit when the main loop is idle.
bool on_main_loop_idle() {
std::cout << "loop_idle\n";
loop->quit();
return false;
}
void on_dbus_proxy_available(Glib::RefPtr<Gio::AsyncResult>& result)
{
auto proxy = Gio::DBus::Proxy::create_finish(result);
if(!proxy) {
std::cerr << "The proxy to the user's session bus was not successfully "
"created." << std::endl;
loop->quit();
return;
}
try {
std::cout << "Calling...\n";
proxy->call_sync("Method");
std::cout << "It works!\n";
}
catch(const Glib::Error& error) {
std::cerr << "Got an error: '" << error.what() << "'." << std::endl;
}
// Connect an idle callback to the main loop to quit when the main loop is
// idle now that the method call is finished.
Glib::signal_idle().connect(sigc::ptr_fun(&on_main_loop_idle));
}
int client_main() {
loop = Glib::MainLoop::create();
auto connection =
Gio::DBus::Connection::get_sync(Gio::DBus::BUS_TYPE_SESSION);
if(!connection) {
std::cerr << "The user's session bus is not available." << std::endl;
return 1;
}
// Create the proxy to the bus asynchronously.
Gio::DBus::Proxy::create(connection, "org.glibmm.DBusExample",
"/org/glibmm/DBusExample", "org.glibmm.DBusExample",
sigc::ptr_fun(&on_dbus_proxy_available));
loop->run();
return EXIT_SUCCESS;
}
I compile the test with g++ -O2 -std=c++0x main.cc server.cc client.cc -o test $(pkg-config --cflags --libs giomm-2.4) and run:
./test
on_bus_acquired
Calling...
<it hangs>
However, when I change main.cc:
#include <giomm.h>
int server_main();
int client_main();
int main() {
Gio::init();
auto childid = fork();
if (childid == 0) {
server_main();
} else {
sleep(1);
client_main();
}
}
I get:
./test
on_bus_acquired
Calling...
Method was called
It works!
So call_sync() returns successfully.
I tried to exclude loops from server and client, and use a single-threaded main.cc:
#include <giomm.h>
#include <thread>
int server_main();
int client_main();
int main() {
Gio::init();
server_main();
client_main();
auto loop = Glib::MainLoop::create();
loop->run();
}
Nothing helps. The question is, what am I doing wrong? I want to use my d-bus server and client in one process.
I figured it out, the trick is to execute
Glib::VariantContainerBase result;
invocation->return_value(result);
in the end of on_method_call.