DirectX Application Causes Computer Crash - crash

I have written a DirectX11 Application that is running on an Alienware Laptop with a GeForce GT 555m graphics card, so I know the computer supports it.
I have come across this weird thing were my laptop will turn of without any warning if it isn't plugged into power, however the application runs fine if it is plugged into power.
I have tried to replicate this with other DirectX11 Applications, including the Samples that come with DirectX however my application is the only one that does it, but I use very similar code to the Samples. I must be missing something but I don't know what.
This is my code to setup the SwapChain and the RenderTargetView
ZeroMemory(&swapChainDesc, sizeof(swapChainDesc));
swapChainDesc.BufferCount = 1;
swapChainDesc.BufferDesc.Width = width;
swapChainDesc.BufferDesc.Height = height;
swapChainDesc.BufferDesc.Format = DXGI_FORMAT_R8G8B8A8_UNORM;
if(vsync) {
swapChainDesc.BufferDesc.RefreshRate.Numerator = numerator;
swapChainDesc.BufferDesc.RefreshRate.Denominator = denominator;
}
else {
swapChainDesc.BufferDesc.RefreshRate.Numerator = 0;
swapChainDesc.BufferDesc.RefreshRate.Denominator = 1;
}
swapChainDesc.BufferUsage = DXGI_USAGE_RENDER_TARGET_OUTPUT;
swapChainDesc.OutputWindow = hwnd;
swapChainDesc.SampleDesc.Count = 1;
swapChainDesc.SampleDesc.Quality = 0;
if(fullscreen) {
swapChainDesc.Windowed = false;
}
else {
swapChainDesc.Windowed = true;
}
swapChainDesc.BufferDesc.ScanlineOrdering = DXGI_MODE_SCANLINE_ORDER_UNSPECIFIED;
swapChainDesc.BufferDesc.Scaling = DXGI_MODE_SCALING_UNSPECIFIED;
swapChainDesc.SwapEffect = DXGI_SWAP_EFFECT_DISCARD;
swapChainDesc.Flags = 0;
D3D_FEATURE_LEVEL featureLevels[4] = { D3D_FEATURE_LEVEL_11_0, D3D_FEATURE_LEVEL_10_1, D3D_FEATURE_LEVEL_10_0, D3D_FEATURE_LEVEL_9_3, };
if (FAILED(D3D11CreateDeviceAndSwapChain(0, D3D_DRIVER_TYPE_HARDWARE, 0, 0, featureLevels, 4, D3D11_SDK_VERSION, &swapChainDesc, &swapChain, &device, &featureLevel, &deviceContext))) {
if (!device) {
printf("Insanity Error: Failed To Create Direct3D Device\n");
}
if (!swapChain) {
printf("Insanity Error: Failed To Create Swap Chain\n");
}
return false;
}
else {
switch (featureLevel) {
case D3D_FEATURE_LEVEL_11_0:
printf("Insanity Info: Currently Using DirectX 11.0\n");
break;
case D3D_FEATURE_LEVEL_10_1:
printf("Insanity Info: Currently Using DirectX 10.1\n");
break;
case D3D_FEATURE_LEVEL_10_0:
printf("Insanity Info: Currently Using DirectX 10.0\n");
break;
case D3D_FEATURE_LEVEL_9_3:
printf("Insanity Info: Currently Using DirectX 9.3\n");
break;
default:
printf("Insanity Info: Graphics System Unknown\n");
break;
}
}
if (FAILED(swapChain->GetBuffer(0, __uuidof(ID3D11Texture2D), (LPVOID*)&backBufferPtr))) {
printf("Insanity Error: Faield To Find Back Buffer\n");
return false;
}
if (FAILED(device->CreateRenderTargetView(backBufferPtr, 0, &renderTargetView))) {
printf("Insanity Error: Failed To Create Render View Target\n");
return false;
}
backBufferPtr->Release();
backBufferPtr = 0;
And then this is my code to set up the DepthStencilState and the RasterState
ZeroMemory(&depthStencilDesc, sizeof(D3D11_DEPTH_STENCIL_DESC));
depthStencilDesc.DepthEnable = false;
depthStencilDesc.DepthWriteMask = D3D11_DEPTH_WRITE_MASK_ALL;
depthStencilDesc.DepthFunc = D3D11_COMPARISON_LESS;
depthStencilDesc.StencilEnable = true;
depthStencilDesc.StencilReadMask = 0xFF;
depthStencilDesc.StencilWriteMask = 0xFF;
depthStencilDesc.FrontFace.StencilFailOp = D3D11_STENCIL_OP_KEEP;
depthStencilDesc.FrontFace.StencilDepthFailOp = D3D11_STENCIL_OP_INCR;
depthStencilDesc.FrontFace.StencilPassOp = D3D11_STENCIL_OP_KEEP;
depthStencilDesc.FrontFace.StencilFunc = D3D11_COMPARISON_ALWAYS;
depthStencilDesc.BackFace.StencilFailOp = D3D11_STENCIL_OP_KEEP;
depthStencilDesc.BackFace.StencilDepthFailOp = D3D11_STENCIL_OP_DECR;
depthStencilDesc.BackFace.StencilPassOp = D3D11_STENCIL_OP_KEEP;
depthStencilDesc.BackFace.StencilFunc = D3D11_COMPARISON_ALWAYS;
if (FAILED(device->CreateDepthStencilState(&depthStencilDesc, &depthDisableStencilState))) {
printf("Insanity Error: Failed To Create Depth State\n");
return false;
}
deviceContext->OMSetDepthStencilState(depthEnableStencilState, 1);
this->enableDepthTesting = true;
ZeroMemory(&depthStencilViewDesc, sizeof(depthStencilViewDesc));
depthStencilViewDesc.Format = depthBufferDesc.Format;
depthStencilViewDesc.ViewDimension = D3D11_DSV_DIMENSION_TEXTURE2D;
depthStencilViewDesc.Texture2D.MipSlice = 0;
if (FAILED(device->CreateDepthStencilView(depthStencilBuffer, &depthStencilViewDesc, &depthStencilView))) {
printf("Insanity Error: Failed To Create Depth View\n");
return false;
}
rasterDesc.AntialiasedLineEnable = false;
rasterDesc.CullMode = D3D11_CULL_NONE;
rasterDesc.DepthBias = 0;
rasterDesc.DepthBiasClamp = 0.0f;
rasterDesc.DepthClipEnable = true;
rasterDesc.FillMode = D3D11_FILL_SOLID;
rasterDesc.FrontCounterClockwise = false;
rasterDesc.MultisampleEnable = false;
rasterDesc.ScissorEnable = true;
rasterDesc.SlopeScaledDepthBias = 0.0f;
if (FAILED(device->CreateRasterizerState(&rasterDesc, &rasterState))) {
printf("Insanity Error: Failed To Create Rasterizer State\n");
return false;
}
deviceContext->RSSetState(rasterState);
If any one has any ideas as to what could be going on please help!

Try using the reference device instead of your hardware device and see if the problem recurs. This should help isolate the driver from the code. When you create your device, instead use D3D_DRIVER_TYPE_REFERENCE.

Related

How to render Vulkan's scene to ImGui window

https://github.com/ocornut/imgui/issues/5110
Version: 1.87
Branch: docking
Back-end/Renderer/Compiler/OS
Back-ends: imgui_impl_Vulkan.cpp + imgui_impl_Vulkan.cpp
Operating System: MacOS
My Issue/Question:
How to render Vulakn's scene to ImGui window?
want to achieve this:
image
I have read various documents and #914,but I'm still at a loss.
Maybe I'm close to the answer?
Here is my project code, built quickly via CMake. It is implemented with reference to the Vulkan tutorial.
Also would appreciate any help, thanks a lot.
The key code is here:
m_Dset = ImGui_ImplVulkan_AddTexture(m_TextureSampler, m_TextureImageView, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL);
...
ImGui::Begin("Viewport");
ImVec2 viewportPanelSize = ImGui::GetContentRegionAvail();
ImGui::Image(m_Dset, ImVec2{viewportPanelSize.x, viewportPanelSize.y});
ImGui::End();
I tried several ways:
According to this code I got it:
m_Dset = ImGui_ImplVulkan_AddTexture(m_TextureSampler, m_TextureImageView, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL);
...
ImGui::Begin("Viewport");
ImVec2 viewportPanelSize = ImGui::GetContentRegionAvail();
ImGui::Image(m_Dset, ImVec2{viewportPanelSize.x, viewportPanelSize.y});
ImGui::End();
image
According to this code I got it:
m_Dset = ImGui_ImplVulkan_AddTexture(m_TextureSampler, m_SwapChainImageViews[currentFrame], VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL);
// std::vector<VkImageView> m_SwapChainImageViews;
...
ImGui::Begin("Viewport");
ImVec2 viewportPanelSize = ImGui::GetContentRegionAvail();
ImGui::Image(m_Dset, ImVec2{viewportPanelSize.x, viewportPanelSize.y});
ImGui::End();
image
But when I drag the ImGui window to the main window:
image
3.last attempt:
// remove m_Dset = ImGui_ImplVulkan_AddTexture(...);
...
ImGui::Begin("Viewport");
ImVec2 viewportPanelSize = ImGui::GetContentRegionAvail();
ImGui::Image(m_DescriptorSets[currentFrame], ImVec2{viewportPanelSize.x, viewportPanelSize.y});
ImGui::End();
I got it:
image
Hope you understand what I mean......Any help would be greatly appreciated.It can be quickly reproduced with the items provided above, if you have the time.
I've been stuck for days.
ps: I'm working tirelessly on problems I've encountered while learning Vulkan and ImGui. I have a cold because of it. 😭😭😭😭😭
I must be low IQ.
I have these variables:
private:
GLFWwindow *m_Window;
VkInstance m_Instance;
VkDebugUtilsMessengerEXT m_DebugMessenger;
VkPhysicalDevice m_PhysicalDevice = VK_NULL_HANDLE;
VkDevice m_Device;
VkQueue m_GraphicsQueue;
VkQueue m_PresentQueue;
VkSurfaceKHR m_Surface;
VkSwapchainKHR m_SwapChain;
std::vector<VkImage> m_SwapChainImages;
VkFormat m_SwapChainImageFormat;
VkExtent2D m_SwapChainExtent;
std::vector<VkImageView> m_SwapChainImageViews;
VkRenderPass m_RenderPass;
VkDescriptorSetLayout m_DescriptorSetLayout;
VkPipelineLayout m_PipelineLayout;
VkPipeline m_GraphicsPipeline;
std::vector<VkFramebuffer> m_SwapChainFramebuffers;
VkCommandPool m_CommandPool;
// for imgui
VkDescriptorPool m_ImGuiDescriptorPool;
VkRenderPass m_ImGuiRenderPass;
std::vector<VkFramebuffer> m_ImGuiFramebuffers;
VkCommandPool m_ImGuiCommandPool;
std::vector<VkCommandBuffer> m_ImGuiCommandBuffers;
VkImage m_TextureImage;
VkDeviceMemory m_TextureImageMemory;
VkImageView m_TextureImageView;
VkSampler m_TextureSampler;
VkImage m_DepthImage;
VkDeviceMemory m_DepthImageMemory;
VkImageView m_DepthImageView;
VkBuffer m_VertexBuffer;
VkDeviceMemory m_VertexBufferMemory;
VkBuffer m_IndexBuffer;
VkDeviceMemory m_IndexBufferMemory;
// UniformBuffer
std::vector<VkBuffer> m_UniformBuffers;
std::vector<VkDeviceMemory> m_UniformBuffersMemory;
VkDescriptorPool m_DescriptorPool;
std::vector<VkDescriptorSet> m_DescriptorSets;
std::vector<VkCommandBuffer> m_CommandBuffers;
std::vector<VkSemaphore> m_ImageAvailableSemaphores;
std::vector<VkSemaphore> m_RenderFinishedSemaphores;
std::vector<VkFence> m_InFlightFences;
QueueFamilyIndices m_QueueFamilyIndices;
uint32_t currentFrame = 0;
uint32_t m_ImageCount = 2;
VkDescriptorSet m_Dset;
void initVulkan()
{
createInstance();
setupDebugMessenger();
createSurface();
pickPhysicalDevice();
createLogicalDevice();
createSwapChain();
createImageViews();
createRenderPass();
createDescriptorSetLayout();
createGraphicsPipeline();
createCommandPool(&m_CommandPool);
createDepthResources();
createFramebuffers();
createTextureImage();
createTextureImageView();
createTextureSampler();
createVertexBuffer();
createIndexBuffer();
createUniformBuffers();
createDescriptorPool();
createDescriptorSets();
createCommandBuffers();
createSyncObjects();
IMGUI_CHECKVERSION();
ImGui::CreateContext();
ImGuiIO &io = ImGui::GetIO();
(void)io;
io.ConfigFlags |= ImGuiConfigFlags_NavEnableKeyboard;
io.ConfigFlags |= ImGuiConfigFlags_DockingEnable;
io.ConfigFlags |= ImGuiConfigFlags_ViewportsEnable;
ImGui::StyleColorsDark();
// When viewports are enabled we tweak WindowRounding/WindowBg so platform windows can look identical to regular ones.
ImGuiStyle &style = ImGui::GetStyle();
if (io.ConfigFlags & ImGuiConfigFlags_ViewportsEnable)
{
style.WindowRounding = 0.0f;
style.Colors[ImGuiCol_WindowBg].w = 1.0f;
}
{
VkDescriptorPoolSize pool_sizes[] =
{
{VK_DESCRIPTOR_TYPE_SAMPLER, 1000},
{VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, 1000},
{VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, 1000},
{VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, 1000},
{VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER, 1000},
{VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER, 1000},
{VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1000},
{VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, 1000},
{VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC, 1000},
{VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC, 1000},
{VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT, 1000}};
VkDescriptorPoolCreateInfo pool_info = {};
pool_info.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO;
pool_info.flags = VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT;
pool_info.maxSets = 1000 * IM_ARRAYSIZE(pool_sizes);
pool_info.poolSizeCount = (uint32_t)IM_ARRAYSIZE(pool_sizes);
pool_info.pPoolSizes = pool_sizes;
if (vkCreateDescriptorPool(m_Device, &pool_info, nullptr, &m_ImGuiDescriptorPool) != VK_SUCCESS)
throw std::runtime_error("Create DescriptorPool for m_ImGuiDescriptorPool failed!");
}
// Create RenderPass for m_ImGuiRenderPass
{
createImGuiRenderPass();
}
// Create CommandPool for m_ImGuiCommandPool
{
createCommandPool(&m_ImGuiCommandPool);
}
// Create CommandBuffers for m_ImGuiCommandBuffers
{
createImGuiCommandBuffers();
}
{
createImGuiFramebuffers();
}
ImGui_ImplGlfw_InitForVulkan(m_Window, true);
ImGui_ImplVulkan_InitInfo init_info = {};
init_info.Instance = m_Instance;
init_info.PhysicalDevice = m_PhysicalDevice;
init_info.Device = m_Device;
init_info.QueueFamily = m_QueueFamilyIndices.graphicsFamily.value();
init_info.Queue = m_GraphicsQueue;
init_info.PipelineCache = VK_NULL_HANDLE;
init_info.DescriptorPool = m_ImGuiDescriptorPool;
init_info.Subpass = 0;
init_info.MinImageCount = m_ImageCount;
init_info.ImageCount = m_ImageCount;
init_info.MSAASamples = VK_SAMPLE_COUNT_1_BIT;
init_info.Allocator = nullptr;
init_info.CheckVkResultFn = nullptr;
ImGui_ImplVulkan_Init(&init_info, m_ImGuiRenderPass);
// Upload Fonts
{
VkCommandBuffer commandBuffer = beginSingleTimeCommands(m_ImGuiCommandPool);
ImGui_ImplVulkan_CreateFontsTexture(commandBuffer);
endSingleTimeCommands(commandBuffer, m_ImGuiCommandPool);
ImGui_ImplVulkan_DestroyFontUploadObjects();
}
// m_Dset = ImGui_ImplVulkan_AddTexture(m_TextureSampler, m_TextureImageView, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL);
}
void drawFrame()
{
vkWaitForFences(m_Device, 1, &m_InFlightFences[currentFrame], VK_TRUE, UINT64_MAX);
uint32_t imageIndex;
VkResult result = vkAcquireNextImageKHR(m_Device, m_SwapChain, UINT64_MAX, m_ImageAvailableSemaphores[currentFrame], VK_NULL_HANDLE, &imageIndex);
if (result == VK_ERROR_OUT_OF_DATE_KHR)
{
recreateSwapChain();
return;
}
else if (result != VK_SUCCESS && result != VK_SUBOPTIMAL_KHR)
{
throw std::runtime_error("failed to acquire swap chain image!");
}
// Only reset the fence if we are submitting work
vkResetFences(m_Device, 1, &m_InFlightFences[currentFrame]);
// vkResetCommandBuffer(m_CommandBuffers[currentFrame], 0);
recordCommandBuffer(m_CommandBuffers[currentFrame], imageIndex, m_DescriptorSets[currentFrame]);
{
// vkResetCommandPool(m_Device, m_ImGuiCommandPool, 0);
VkCommandBufferBeginInfo info = {};
info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
info.flags |= VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT;
vkBeginCommandBuffer(m_ImGuiCommandBuffers[currentFrame], &info);
VkRenderPassBeginInfo renderPassInfo = {};
renderPassInfo.sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO;
renderPassInfo.renderPass = m_ImGuiRenderPass;
renderPassInfo.framebuffer = m_ImGuiFramebuffers[imageIndex];
renderPassInfo.renderArea.offset = {0, 0};
renderPassInfo.renderArea.extent = m_SwapChainExtent;
VkClearValue clearColor = {0.0f, 0.0f, 0.0f, 1.0f};
renderPassInfo.clearValueCount = 1;
renderPassInfo.pClearValues = &clearColor;
vkCmdBeginRenderPass(m_ImGuiCommandBuffers[currentFrame], &renderPassInfo, VK_SUBPASS_CONTENTS_INLINE);
// Record dear imgui primitives into command buffer
ImGui_ImplVulkan_RenderDrawData(ImGui::GetDrawData(), m_ImGuiCommandBuffers[currentFrame]);
vkCmdEndRenderPass(m_ImGuiCommandBuffers[currentFrame]);
vkEndCommandBuffer(m_ImGuiCommandBuffers[currentFrame]);
}
updateUniformBuffer(currentFrame);
VkSubmitInfo submitInfo{};
submitInfo.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
std::array<VkCommandBuffer, 2> submitCommandBuffers =
{m_CommandBuffers[currentFrame], m_ImGuiCommandBuffers[currentFrame]};
VkSemaphore waitSemaphores[] = {m_ImageAvailableSemaphores[currentFrame]};
VkPipelineStageFlags waitStages[] = {VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT};
submitInfo.waitSemaphoreCount = 1;
submitInfo.pWaitSemaphores = waitSemaphores;
submitInfo.pWaitDstStageMask = waitStages;
submitInfo.commandBufferCount = static_cast<uint32_t>(submitCommandBuffers.size());
submitInfo.pCommandBuffers = submitCommandBuffers.data();
VkSemaphore signalSemaphores[] = {m_RenderFinishedSemaphores[currentFrame]};
submitInfo.signalSemaphoreCount = 1;
submitInfo.pSignalSemaphores = signalSemaphores;
if (vkQueueSubmit(m_GraphicsQueue, 1, &submitInfo, m_InFlightFences[currentFrame]) != VK_SUCCESS)
{
throw std::runtime_error("failed to submit draw command buffer!");
}
VkPresentInfoKHR presentInfo{};
presentInfo.sType = VK_STRUCTURE_TYPE_PRESENT_INFO_KHR;
presentInfo.waitSemaphoreCount = 1;
presentInfo.pWaitSemaphores = signalSemaphores;
VkSwapchainKHR swapChains[] = {m_SwapChain};
presentInfo.swapchainCount = 1;
presentInfo.pSwapchains = swapChains;
presentInfo.pImageIndices = &imageIndex;
presentInfo.pResults = nullptr; // Optional
result = vkQueuePresentKHR(m_PresentQueue, &presentInfo);
if (result == VK_ERROR_OUT_OF_DATE_KHR || result == VK_SUBOPTIMAL_KHR || m_FramebufferResized)
{
m_FramebufferResized = false;
recreateSwapChain();
}
else if (result != VK_SUCCESS)
{
throw std::runtime_error("failed to present swap chain image!");
}
currentFrame = (currentFrame + 1) % MAX_FRAMES_IN_FLIGHT;
}
void recordCommandBuffer(const VkCommandBuffer &commandBuffer, const uint32_t &imageIndex, const VkDescriptorSet &descriptorSet)
{
VkCommandBufferBeginInfo beginInfo{};
beginInfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
// beginInfo.flags = 0; // Optional
// beginInfo.pInheritanceInfo = nullptr; // Optional
if (vkBeginCommandBuffer(commandBuffer, &beginInfo) != VK_SUCCESS)
{
throw std::runtime_error("failed to begin recording command buffer!");
}
VkRenderPassBeginInfo renderPassInfo{};
renderPassInfo.sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO;
renderPassInfo.renderPass = m_RenderPass;
renderPassInfo.framebuffer = m_SwapChainFramebuffers[imageIndex];
renderPassInfo.renderArea.offset = {0, 0};
renderPassInfo.renderArea.extent = m_SwapChainExtent;
std::array<VkClearValue, 2> clearValues{};
clearValues[0].color = {{0.0f, 0.0f, 0.0f, 1.0f}};
clearValues[1].depthStencil = {1.0f, 0};
renderPassInfo.clearValueCount = static_cast<uint32_t>(clearValues.size());
renderPassInfo.pClearValues = clearValues.data();
vkCmdBeginRenderPass(commandBuffer, &renderPassInfo, VK_SUBPASS_CONTENTS_INLINE);
vkCmdBindPipeline(commandBuffer, VK_PIPELINE_BIND_POINT_GRAPHICS, m_GraphicsPipeline);
VkBuffer vertexBuffers[] = {m_VertexBuffer};
VkDeviceSize offsets[] = {0};
vkCmdBindVertexBuffers(commandBuffer, 0, 1, vertexBuffers, offsets);
vkCmdBindIndexBuffer(commandBuffer, m_IndexBuffer, 0, VK_INDEX_TYPE_UINT16);
vkCmdBindDescriptorSets(commandBuffer, VK_PIPELINE_BIND_POINT_GRAPHICS, m_PipelineLayout, 0, 1, &descriptorSet, 0, nullptr);
vkCmdDrawIndexed(commandBuffer, static_cast<uint32_t>(indices.size()), 1, 0, 0, 0);
vkCmdEndRenderPass(commandBuffer);
if (vkEndCommandBuffer(commandBuffer) != VK_SUCCESS)
{
throw std::runtime_error("failed to record command buffer!");
}
}
For me image just not rendering in ImGui::Image and i don't know why. I saw my image in shader inputs, but in shader outputs i had just black color.
I guess in your case you should render your scene to another image (not swapchain image). And it means that you need to use another render pass for it. Use swapchain images only for imgui output.
I think when we createSwapChain(); set
VkSurfaceFormatKHR surfaceFormat;
surfaceFormat.format = VK_FORMAT_B8G8R8A8_UNORM;
surfaceFormat.colorSpace = VK_COLORSPACE_SRGB_NONLINEAR_KHR;
like this to avoid validation error!

Cannot create logical device only in debug mode

I'm getting VK_ERROR_FEATURE_NOT_PRESENT(-8).
But i'm using vkGetPhysicalDeviceFeatures to get features.
My Code:
std::vector<VkDeviceQueueCreateInfo> LogicalDevice::CreateDeviceQueueCreateInfos(QueueFamilies queueFamilies)
{
std::vector uniqueQueueFamilies = queueFamilies.GetUniqueQueueFamilies();
std::vector<VkDeviceQueueCreateInfo> queueCreateInfos;
for (auto queueFamily : uniqueQueueFamilies)
{
const int countOfQueues = queueFamily.CountOfQueues;
std::vector<float> queuePriorities(countOfQueues);
for (int indexOfPriority = 0; indexOfPriority < countOfQueues; indexOfPriority++)
{
queuePriorities[indexOfPriority] = 1.0f - ( (float) indexOfPriority / countOfQueues);
}
VkDeviceQueueCreateInfo queueCreateInfo{};
queueCreateInfo.sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO;
queueCreateInfo.queueFamilyIndex = queueFamily.Index.value();
queueCreateInfo.queueCount = queueFamily.CountOfQueues;
queueCreateInfo.flags = queueFamily.Flags;
queueCreateInfo.pQueuePriorities = queuePriorities.data();
queueCreateInfos.push_back(queueCreateInfo);
}
return queueCreateInfos;
}
VkDeviceCreateInfo LogicalDevice::GetDeviceCreateInfo(std::vector<VkDeviceQueueCreateInfo> deviceQueueCreateInfos, VkPhysicalDevice physicalDevice)
{
VkPhysicalDeviceFeatures deviceFeatures{};
vkGetPhysicalDeviceFeatures(physicalDevice, &deviceFeatures);
VkDeviceCreateInfo deviceCreateInfo{};
deviceCreateInfo.sType = VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO;
deviceCreateInfo.queueCreateInfoCount = static_cast<uint32_t>(deviceQueueCreateInfos.size());
deviceCreateInfo.pQueueCreateInfos = deviceQueueCreateInfos.data();
deviceCreateInfo.pEnabledFeatures = &deviceFeatures;
return deviceCreateInfo;
}
void LogicalDevice::Initialize(VkPhysicalDevice physicalDevice, VkSurfaceKHR surfaceForPickingPhysicalDevice)
{
m_queueFamilies = QueueFamilies::GetQueueFamilies(physicalDevice, surfaceForPickingPhysicalDevice);
std::vector<VkDeviceQueueCreateInfo> deviceQueueCreateInfos = CreateDeviceQueueCreateInfos(m_queueFamilies);
VkDeviceCreateInfo deviceCreateInfo = GetDeviceCreateInfo(deviceQueueCreateInfos, physicalDevice);
VkResult result = vkCreateDevice(physicalDevice, &deviceCreateInfo, nullptr, &m_vulkanDevice);
if (result != VK_SUCCESS)
{
throw new std::runtime_error("Cannot create logical device.");
}
}
The deviceFeature variable that you read the features into and which is pointed at in the create info structure is local to GetDeviceCreateInfo. This is out-of-scope at the point where you call vkCreateDevice, which results in undefined behavior. You're probably getting random junk at device creation time instead, which causes that error.

Record rtsp stream with ffmpeg in iOS

I've followed iFrameExtractor to successfully stream rtsp in my swift project. In this project, it also has recording function. It basically use avformat_write_header
, av_interleaved_write_frame and av_write_trailer to save the rtsp source into mp4 file.
When I used this project in my device, the rtsp streaming works fine, but recording function will always generate a blank mp4 file with no image and sound.
Could anyone tell me what step that I miss?
I'm using iPhone5 with iOS 9.1 and XCode 7.1.1.
The ffmpeg is 2.8.3 version and followed the compile instruction by CompilationGuide – FFmpeg
Following is the sample code in this project
The function that generate every frame:
-(BOOL)stepFrame {
// AVPacket packet;
int frameFinished=0;
static bool bFirstIFrame=false;
static int64_t vPTS=0, vDTS=0, vAudioPTS=0, vAudioDTS=0;
while(!frameFinished && av_read_frame(pFormatCtx, &packet)>=0) {
// Is this a packet from the video stream?
if(packet.stream_index==videoStream) {
// 20130525 albert.liao modified start
// Initialize a new format context for writing file
if(veVideoRecordState!=eH264RecIdle)
{
switch(veVideoRecordState)
{
case eH264RecInit:
{
if ( !pFormatCtx_Record )
{
int bFlag = 0;
//NSString *videoPath = [NSHomeDirectory() stringByAppendingPathComponent:#"Documents/test.mp4"];
NSString *videoPath = #"/Users/liaokuohsun/iFrameTest.mp4";
const char *file = [videoPath UTF8String];
pFormatCtx_Record = avformat_alloc_context();
bFlag = h264_file_create(file, pFormatCtx_Record, pCodecCtx, pAudioCodecCtx,/*fps*/0.0, packet.data, packet.size );
if(bFlag==true)
{
veVideoRecordState = eH264RecActive;
fprintf(stderr, "h264_file_create success\n");
}
else
{
veVideoRecordState = eH264RecIdle;
fprintf(stderr, "h264_file_create error\n");
}
}
}
//break;
case eH264RecActive:
{
if((bFirstIFrame==false) &&(packet.flags&AV_PKT_FLAG_KEY)==AV_PKT_FLAG_KEY)
{
bFirstIFrame=true;
vPTS = packet.pts ;
vDTS = packet.dts ;
#if 0
NSRunLoop *pRunLoop = [NSRunLoop currentRunLoop];
[pRunLoop addTimer:RecordingTimer forMode:NSDefaultRunLoopMode];
#else
[NSTimer scheduledTimerWithTimeInterval:5.0//2.0
target:self
selector:#selector(StopRecording:)
userInfo:nil
repeats:NO];
#endif
}
// Record audio when 1st i-Frame is obtained
if(bFirstIFrame==true)
{
if ( pFormatCtx_Record )
{
#if PTS_DTS_IS_CORRECT==1
packet.pts = packet.pts - vPTS;
packet.dts = packet.dts - vDTS;
#endif
h264_file_write_frame( pFormatCtx_Record, packet.stream_index, packet.data, packet.size, packet.dts, packet.pts);
}
else
{
NSLog(#"pFormatCtx_Record no exist");
}
}
}
break;
case eH264RecClose:
{
if ( pFormatCtx_Record )
{
h264_file_close(pFormatCtx_Record);
#if 0
// 20130607 Test
dispatch_async(dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT, 0), ^(void)
{
ALAssetsLibrary *library = [[ALAssetsLibrary alloc]init];
NSString *filePathString = [NSHomeDirectory() stringByAppendingPathComponent:#"Documents/test.mp4"];
NSURL *filePathURL = [NSURL fileURLWithPath:filePathString isDirectory:NO];
if(1)// ([library videoAtPathIsCompatibleWithSavedPhotosAlbum:filePathURL])
{
[library writeVideoAtPathToSavedPhotosAlbum:filePathURL completionBlock:^(NSURL *assetURL, NSError *error){
if (error) {
// TODO: error handling
NSLog(#"writeVideoAtPathToSavedPhotosAlbum error");
} else {
// TODO: success handling
NSLog(#"writeVideoAtPathToSavedPhotosAlbum success");
}
}];
}
[library release];
});
#endif
vPTS = 0;
vDTS = 0;
vAudioPTS = 0;
vAudioDTS = 0;
pFormatCtx_Record = NULL;
NSLog(#"h264_file_close() is finished");
}
else
{
NSLog(#"fc no exist");
}
bFirstIFrame = false;
veVideoRecordState = eH264RecIdle;
}
break;
default:
if ( pFormatCtx_Record )
{
h264_file_close(pFormatCtx_Record);
pFormatCtx_Record = NULL;
}
NSLog(#"[ERROR] unexpected veVideoRecordState!!");
veVideoRecordState = eH264RecIdle;
break;
}
}
// Decode video frame
avcodec_decode_video2(pCodecCtx, pFrame, &frameFinished, &packet);
}
else if(packet.stream_index==audioStream)
{
// 20131024 albert.liao modfied start
static int vPktCount=0;
BOOL bIsAACADTS = FALSE;
int ret = 0;
if(aPlayer.vAACType == eAAC_UNDEFINED)
{
tAACADTSHeaderInfo vxAACADTSHeaderInfo = {0};
bIsAACADTS = [AudioUtilities parseAACADTSHeader:(uint8_t *)packet.data ToHeader:&vxAACADTSHeaderInfo];
}
#synchronized(aPlayer)
{
if(aPlayer==nil)
{
aPlayer = [[AudioPlayer alloc]initAudio:nil withCodecCtx:(AVCodecContext *) pAudioCodecCtx];
NSLog(#"aPlayer initAudio");
if(bIsAACADTS)
{
aPlayer.vAACType = eAAC_ADTS;
//NSLog(#"is ADTS AAC");
}
}
else
{
if(vPktCount<5) // The voice is listened once image is rendered
{
vPktCount++;
}
else
{
if([aPlayer getStatus]!=eAudioRunning)
{
dispatch_async(dispatch_get_main_queue(), ^(void) {
#synchronized(aPlayer)
{
NSLog(#"aPlayer start play");
[aPlayer Play];
}
});
}
}
}
};
#synchronized(aPlayer)
{
int ret = 0;
ret = [aPlayer putAVPacket:&packet];
if(ret <= 0)
NSLog(#"Put Audio Packet Error!!");
}
// 20131024 albert.liao modfied end
if(bFirstIFrame==true)
{
switch(veVideoRecordState)
{
case eH264RecActive:
{
if ( pFormatCtx_Record )
{
h264_file_write_audio_frame(pFormatCtx_Record, pAudioCodecCtx, packet.stream_index, packet.data, packet.size, packet.dts, packet.pts);
}
else
{
NSLog(#"pFormatCtx_Record no exist");
}
}
}
}
}
else
{
//fprintf(stderr, "packet len=%d, Byte=%02X%02X%02X%02X%02X\n",\
packet.size, packet.data[0],packet.data[1],packet.data[2],packet.data[3], packet.data[4]);
}
// 20130525 albert.liao modified end
}
return frameFinished!=0;
}
avformat_write_header:
int h264_file_create(const char *pFilePath, AVFormatContext *fc, AVCodecContext *pCodecCtx,AVCodecContext *pAudioCodecCtx, double fps, void *p, int len )
{
int vRet=0;
AVOutputFormat *of=NULL;
AVStream *pst=NULL;
AVCodecContext *pcc=NULL, *pAudioOutputCodecContext=NULL;
avcodec_register_all();
av_register_all();
av_log_set_level(AV_LOG_VERBOSE);
if(!pFilePath)
{
fprintf(stderr, "FilePath no exist");
return -1;
}
if(!fc)
{
fprintf(stderr, "AVFormatContext no exist");
return -1;
}
fprintf(stderr, "file=%s\n",pFilePath);
// Create container
of = av_guess_format( 0, pFilePath, 0 );
fc->oformat = of;
strcpy( fc->filename, pFilePath );
// Add video stream
pst = avformat_new_stream( fc, 0 );
vVideoStreamIdx = pst->index;
fprintf(stderr,"Video Stream:%d",vVideoStreamIdx);
pcc = pst->codec;
avcodec_get_context_defaults3( pcc, AVMEDIA_TYPE_VIDEO );
// Save the stream as origin setting without convert
pcc->codec_type = pCodecCtx->codec_type;
pcc->codec_id = pCodecCtx->codec_id;
pcc->bit_rate = pCodecCtx->bit_rate;
pcc->width = pCodecCtx->width;
pcc->height = pCodecCtx->height;
if(fps==0)
{
double fps=0.0;
AVRational pTimeBase;
pTimeBase.num = pCodecCtx->time_base.num;
pTimeBase.den = pCodecCtx->time_base.den;
fps = 1.0/ av_q2d(pCodecCtx->time_base)/ FFMAX(pCodecCtx->ticks_per_frame, 1);
fprintf(stderr,"fps_method(tbc): 1/av_q2d()=%g",fps);
pcc->time_base.num = 1;
pcc->time_base.den = fps;
}
else
{
pcc->time_base.num = 1;
pcc->time_base.den = fps;
}
// reference ffmpeg\libavformat\utils.c
// For SPS and PPS in avcC container
pcc->extradata = malloc(sizeof(uint8_t)*pCodecCtx->extradata_size);
memcpy(pcc->extradata, pCodecCtx->extradata, pCodecCtx->extradata_size);
pcc->extradata_size = pCodecCtx->extradata_size;
// For Audio stream
if(pAudioCodecCtx)
{
AVCodec *pAudioCodec=NULL;
AVStream *pst2=NULL;
pAudioCodec = avcodec_find_encoder(AV_CODEC_ID_AAC);
// Add audio stream
pst2 = avformat_new_stream( fc, pAudioCodec );
vAudioStreamIdx = pst2->index;
pAudioOutputCodecContext = pst2->codec;
avcodec_get_context_defaults3( pAudioOutputCodecContext, pAudioCodec );
fprintf(stderr,"Audio Stream:%d",vAudioStreamIdx);
fprintf(stderr,"pAudioCodecCtx->bits_per_coded_sample=%d",pAudioCodecCtx->bits_per_coded_sample);
pAudioOutputCodecContext->codec_type = AVMEDIA_TYPE_AUDIO;
pAudioOutputCodecContext->codec_id = AV_CODEC_ID_AAC;
// Copy the codec attributes
pAudioOutputCodecContext->channels = pAudioCodecCtx->channels;
pAudioOutputCodecContext->channel_layout = pAudioCodecCtx->channel_layout;
pAudioOutputCodecContext->sample_rate = pAudioCodecCtx->sample_rate;
pAudioOutputCodecContext->bit_rate = 12000;//pAudioCodecCtx->sample_rate * pAudioCodecCtx->bits_per_coded_sample;
pAudioOutputCodecContext->bits_per_coded_sample = pAudioCodecCtx->bits_per_coded_sample;
pAudioOutputCodecContext->profile = pAudioCodecCtx->profile;
//FF_PROFILE_AAC_LOW;
// pAudioCodecCtx->bit_rate;
// AV_SAMPLE_FMT_U8P, AV_SAMPLE_FMT_S16P
//pAudioOutputCodecContext->sample_fmt = AV_SAMPLE_FMT_FLTP;//pAudioCodecCtx->sample_fmt;
pAudioOutputCodecContext->sample_fmt = pAudioCodecCtx->sample_fmt;
//pAudioOutputCodecContext->sample_fmt = AV_SAMPLE_FMT_U8;
pAudioOutputCodecContext->sample_aspect_ratio = pAudioCodecCtx->sample_aspect_ratio;
pAudioOutputCodecContext->time_base.num = pAudioCodecCtx->time_base.num;
pAudioOutputCodecContext->time_base.den = pAudioCodecCtx->time_base.den;
pAudioOutputCodecContext->ticks_per_frame = pAudioCodecCtx->ticks_per_frame;
pAudioOutputCodecContext->frame_size = 1024;
fprintf(stderr,"profile:%d, sample_rate:%d, channles:%d", pAudioOutputCodecContext->profile, pAudioOutputCodecContext->sample_rate, pAudioOutputCodecContext->channels);
AVDictionary *opts = NULL;
av_dict_set(&opts, "strict", "experimental", 0);
if (avcodec_open2(pAudioOutputCodecContext, pAudioCodec, &opts) < 0) {
fprintf(stderr, "\ncould not open codec\n");
}
av_dict_free(&opts);
#if 0
// For Audio, this part is no need
if(pAudioCodecCtx->extradata_size!=0)
{
NSLog(#"extradata_size !=0");
pAudioOutputCodecContext->extradata = malloc(sizeof(uint8_t)*pAudioCodecCtx->extradata_size);
memcpy(pAudioOutputCodecContext->extradata, pAudioCodecCtx->extradata, pAudioCodecCtx->extradata_size);
pAudioOutputCodecContext->extradata_size = pAudioCodecCtx->extradata_size;
}
else
{
// For WMA test only
pAudioOutputCodecContext->extradata_size = 0;
NSLog(#"extradata_size ==0");
}
#endif
}
if(fc->oformat->flags & AVFMT_GLOBALHEADER)
{
pcc->flags |= CODEC_FLAG_GLOBAL_HEADER;
pAudioOutputCodecContext->flags |= CODEC_FLAG_GLOBAL_HEADER;
}
if ( !( fc->oformat->flags & AVFMT_NOFILE ) )
{
vRet = avio_open( &fc->pb, fc->filename, AVIO_FLAG_WRITE );
if(vRet!=0)
{
fprintf(stderr,"avio_open(%s) error", fc->filename);
}
}
// dump format in console
av_dump_format(fc, 0, pFilePath, 1);
vRet = avformat_write_header( fc, NULL );
if(vRet==0)
return 1;
else
return 0;
}
av_interleaved_write_frame:
void h264_file_write_frame(AVFormatContext *fc, int vStreamIdx, const void* p, int len, int64_t dts, int64_t pts )
{
AVStream *pst = NULL;
AVPacket pkt;
if ( 0 > vVideoStreamIdx )
return;
// may be audio or video
pst = fc->streams[ vStreamIdx ];
// Init packet
av_init_packet( &pkt );
if(vStreamIdx ==vVideoStreamIdx)
{
pkt.flags |= ( 0 >= getVopType( p, len ) ) ? AV_PKT_FLAG_KEY : 0;
//pkt.flags |= AV_PKT_FLAG_KEY;
pkt.stream_index = pst->index;
pkt.data = (uint8_t*)p;
pkt.size = len;
pkt.dts = AV_NOPTS_VALUE;
pkt.pts = AV_NOPTS_VALUE;
// TODO: mark or unmark the log
//fprintf(stderr, "dts=%lld, pts=%lld\n",dts,pts);
// av_write_frame( fc, &pkt );
}
av_interleaved_write_frame( fc, &pkt );
}
av_write_trailer:
void h264_file_close(AVFormatContext *fc)
{
if ( !fc )
return;
av_write_trailer( fc );
if ( fc->oformat && !( fc->oformat->flags & AVFMT_NOFILE ) && fc->pb )
avio_close( fc->pb );
av_free( fc );
}
Thanks.
It looks like you're using the same AVFormatContext for both the input and output?
In the line
pst = fc->streams[ vStreamIdx ];
You're assigning the AVStream* from your AVFormatContext connected with your input (RTSP stream). But then later on you're trying to write the packet back to the same context av_interleaved_write_frame( fc, &pkt );. I kind of think of a context as a file which has helped me navagate this type of thing better. I do something identicial to what you're doing (not iOS though) where I use a separate AVFormatContext for each of the input (RTSP stream) and output (mp4 file). If I'm correct, I think what you just need to do is initialize an AVFormatContext and properly.
The following code (without error checking everything) is what I do to take an AVFormatContext * output_format_context = NULL and the AVFormatContext * input_format_context that I had associated with the RTSP stream and write from one to the other. This is after I have fetched a packet, etc., which in your case it looks like you're populating (I just take the packet from av_read_frame and re-package it.
This is code that could be in your write frame function (but it also does include the writing of the header).
AVFormatContext * output_format_context;
AVStream * in_stream_2;
AVStream * out_stream_2;
// Allocate the context with the output file
avformat_alloc_output_context2(&output_format_context, NULL, NULL, out_filename.c_str());
// Point to AVOutputFormat * output_format for manipulation
output_format = output_format_context->oformat;
// Loop through all streams
for (i = 0; i < input_format_context->nb_streams; i++) {
// Create a pointer to the input stream that was allocated earlier in the code
AVStream *in_stream = input_format_context->streams[i];
// Create a pointer to a new stream that will be part of the output
AVStream *out_stream = avformat_new_stream(output_format_context, in_stream->codec->codec);
// Set time_base of the new output stream to equal the input stream one since I'm not changing anything (can avoid but get a deprecation warning)
out_stream->time_base = in_stream->time_base;
// This is the non-deprecated way of copying all the parameters from the input stream into the output stream since everything stays the same
avcodec_parameters_from_context(out_stream->codecpar, in_stream->codec);
// I don't remember what this is for :)
out_stream->codec->codec_tag = 0;
// This just sets a flag from the format context to the stream relating to the header
if (output_format_context->oformat->flags & AVFMT_GLOBALHEADER)
out_stream->codec->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
}
// Check NOFILE flag and open the output file context (previously the output file was associated with the format context, now it is actually opened.
if (!(output_format->flags & AVFMT_NOFILE))
avio_open(&output_format_context->pb, out_filename.c_str(), AVIO_FLAG_WRITE);
// Write the header (not sure if this is always needed but h264 I believe it is.
avformat_write_header(output_format_context,NULL);
// Re-getting the appropriate stream that was populated above (this should allow for both audio/video)
in_stream_2 = input_format_context->streams[packet.stream_index];
out_stream_2 = output_format_context->streams[packet.stream_index];
// Rescaling pts and dts, duration and pos - you would do as you need in your code.
packet.pts = av_rescale_q_rnd(packet.pts, in_stream_2->time_base, out_stream_2->time_base, (AVRounding) (AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
packet.dts = av_rescale_q_rnd(packet.dts, in_stream_2->time_base, out_stream_2->time_base, (AVRounding) (AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
packet.duration = av_rescale_q(packet.duration, in_stream_2->time_base, out_stream_2->time_base);
packet.pos = -1;
// The first packet of my stream always gives me negative dts/pts so this just protects that first one for my purposes. You probably don't need.
if (packet.dts < 0) packet.dts = 0;
if (packet.pts < 0) packet.pts = 0;
// Finally write the frame
av_interleaved_write_frame(output_format_context, &packet);
// ....
// Write header, close/cleanup... etc
// ....
This code is fairly bare bones and doesn't include the setup (which it sounds like you're doing correctly anyway). I would also imagine this code could be cleaned up and tweaked for your purposes, but this works for me to re-write the RTSP stream into a file (in my case many files but code not shown).
The code is C code, so you might need to do minor tweaks for making it Swift compatible (for some of the library function calls maybe). I think overall it should be compatible though.
Hopefully this helps point you to the right direction. This was cobbled together thanks to several sample code sources (I don't remember where), along with warning prompts from the libraries themselves.

Library to get input from a PS4 or Xbox controller?

I am looking to control an ROV by using either a PS4 controller or an Xbox 360 controller. I plan to do this by connecting the controller to a computer and then sending that to the ROV. A little background information on the project is, we have the controller up top, which sends it to the computer, and then we will send from the computer to an Arduino on the ROV via Ethernet.
I have learned that this is possible on the PS3 controller by using Processing because another team did it, but I am unfamiliar with that language and don't have a PS3 controller. However, I have a PS4 and Xbox 360 controller.
From what I have researched, SDL could be an option for this, but I am looking for advice from more experienced people than me.
My question got deleted by pissed mods, because it was "unrelated", but I asked the same. Anyway, I implemented my PS4 controller support with SFML like this. Other simple possibility would be SDL. XBox will be the same I guess. Have fun
Here some sample code..
sf::Joystick::update();
// Is joystick #0 connected?
m_bGamepadConnected = sf::Joystick::isConnected(0);
if(!m_bGamepadConnected) {
return;
}
// Does joystick #0 define a X axis?
m_bStickLX = sf::Joystick::hasAxis(0, sf::Joystick::X);
m_bStickLY = sf::Joystick::hasAxis(0, sf::Joystick::Y);
m_bStickRX = sf::Joystick::hasAxis(0, sf::Joystick::Z);
m_bStickRY = sf::Joystick::hasAxis(0, sf::Joystick::R);
m_bStickL2 = sf::Joystick::hasAxis(0, sf::Joystick::U);
m_bStickR2 = sf::Joystick::hasAxis(0, sf::Joystick::V);
m_tJoystick.start(5);
QObject::connect(&m_tJoystick, SIGNAL(timeout() ), this, SLOT(updateGamepad() ) );
And this may help you ..
sf::Joystick::update();
float fStickLX = 0;
float fStickLY = 0;
float fStickRX = 0;
float fStickRY = 0;
float fStickL2 = 0;
float fStickR2 = 0;
// What's the current position of the analog sticks
if(m_bStickLX) {
fStickLX = sf::Joystick::getAxisPosition(0, sf::Joystick::X) / 100;
}
if(m_bStickLY) {
fStickLY = sf::Joystick::getAxisPosition(0, sf::Joystick::Y) / 100;
}
if(m_bStickRX) {
fStickRX = sf::Joystick::getAxisPosition(0, sf::Joystick::Z) / 100;
}
if(m_bStickRY) {
fStickRY = sf::Joystick::getAxisPosition(0, sf::Joystick::R) / 100;
}
// R2 and L2
if(m_bStickL2) {
fStickL2 = sf::Joystick::getAxisPosition(0, sf::Joystick::U) + 100;
fStickL2 /= 200;
}
if(m_bStickR2) {
fStickR2 = sf::Joystick::getAxisPosition(0, sf::Joystick::V) + 100;
fStickR2 /= 200;
}
if(m_fStickLX != 0 || m_fStickLY != 0 || m_fStickRX != 0 || m_fStickRY != 0 || m_fStickL2 != fStickL2 || m_fStickR2 != fStickR2) {
m_bGamepadInUse = true;
} else {
m_bGamepadInUse = false;
}
m_fStickLX = fStickLX;
m_fStickLY = fStickLY;
m_fStickRX = fStickRX;
m_fStickRY = fStickRY;
m_fStickL2 = fStickL2;
m_fStickR2 = fStickR2;
m_bButLPressed = isButtonPressed(0, 0);
m_bButDPressed = isButtonPressed(0, 1);
m_bButRPressed = isButtonPressed(0, 2);
m_bButTPressed = isButtonPressed(0, 3);
//qDebug() << "m_bButDPressed pressed: " << m_bButDPressed;
static bool bArmToggle = false;
if(m_bButDPressed && !bArmToggle) {
m_bDeviceArmed = m_bDeviceArmed ? false : true;
bArmToggle = true;
qDebug() << "Device armed?: " << m_bDeviceArmed;
}
if(bArmToggle && !m_bButDPressed) {
bArmToggle = false;
}

ActionScript 3 coding error

I need a little bit of help with some code issue. I am trying to connect to the server from a click of a button. When I execute the code I receive this error in my output: 1120: Access of undefined property nc.close
Which is strange because I already declared the nc property in the btnStart function. I have been tweaking around with the code but I keep getting the same annoying error. If you have any suggestions how to resolve this that be great!
Thanks
Code is below:
btn_One.addEventListener(MouseEvent.CLICK, btnStart);
btn_Two.addEventListener(MouseEvent.CLICK, btnClose);
function btnStart(event:MouseEvent):void{
trace("Connecting...");
var nc:NetConnection = new NetConnection();
nc.addEventListener(NetStatusEvent.NET_STATUS, netStatusHandler);
nc.connect("rtmfp://localhost/streamingLive");
}
//// ERROR LINE NC.CLOSE();
function btnClose(event:MouseEvent):void{
trace("Closing time");
nc.close();
}
function netStatusHandler(event:NetStatusEvent):void{
switch(event.info.code){
case "NetConnection.Connect.Success":
trace("Awesome connection");
break;
case "NetConnection.Connect.Failed":
trace("Unable to connect");
break;
case "NetConnection.Connect.Rejected":
trace("WHoooops");
break;
case "NetGroup.Connect.Success":
trace("GroupConnection");
break;
case "NetGroup.Connect.Failed":
trace("Group failed");
break;
case "NetGroup.Connect.Rejected":
trace("Ouch!!!");
break;
var ns:NetStream = new NetStream();
ns.publish("live", "streaming");
ns.attachCamera();
ns.attachAudio();
ns.connect(nc);
var ng:NetGroup = new NetGroup(nc, groupspec.groupspecWithAuthorizations());
ng.addEventListener(NetStatusEvent.NET_STATUS, netStatusHandler);
}
}
var cam:Camera = Camera.getCamera();
cam.setMode(420, 320, 15);
cam.setQuality(0, 85);
cam.addEventListener(StatusEvent.STATUS, statusHandler);
var vid:Video = new Video();
vid.width = cam.width;
vid.height = cam.height;
vid.x = 100;
vid.y = 100;
vid.attachCamera(cam);
addChild(vid);
var mic:Microphone = Microphone.getMicrophone();
mic.gain = 50;
mic.framesPerPacket = 1;
mic.setSilenceLevel(0, 2000);
mic.codec = SoundCodec.SPEEX;
//camera access permissions
function statusHandler(event:StatusEvent):void
{
switch (event.code)
{
case "Camera.Muted":
trace("User clicked Deny.");
break;
case "Camera.Unmuted":
trace("User clicked Accept.");
break;
}
}
//audio access permissions
function micStatus(event:StatusEvent):void
{
if (event.code == "Microphone.Unmuted")
{
trace("Microphone access was allowed.");
}
else if (event.code == "Microphone.Muted")
{
trace("Microphone access was denied.");
}
}
//array of camera names
var cameraA:Array = Camera.names;
for ( var i : int = 0; i < cameraA.length; i++){
trace ( "Camera: ", cameraA[i] );
}
var groupspec:GroupSpecifier = new GroupSpecifier("groupone");
groupspec.multicastEnabled = true;
//group postings
groupspec.postingEnabled = true;
//specific peer posting
groupspec.routingEnabled = true;
//automatic peer discovery
groupspec.serverChannelEnabled = true;
nc is declared inside one function, but you're trying to use it inside of another function. Sometimes you can just pass the variable around as an argument, but in this case, that won't really work out too well. So in this case, do the following:
var nc:NetConnection = new NetConnection();
function btnStart(event:MouseEvent):void{
trace("Connecting...");
// var nc:NetConnection = new NetConnection(); // removed
nc.addEventListener(NetStatusEvent.NET_STATUS, netStatusHandler);
nc.connect("rtmfp://localhost/streamingLive");
}
//// ERROR LINE NC.CLOSE();
function btnClose(event:MouseEvent):void{
trace("Closing time");
nc.close();
}