I am always stuck at the swapchain creation, and I don't know why. I enabled the validation layers, and the best anwser I got is:
vkCreateSwapchainKHR: internal drawable creation failed
I have an Nvidia GTX960 card. i ran some vulkan samples on it so, It must support vulkan.
Here is my swapchain creator function:
void Renderer::createSwapChain(VkSwapchainKHR *swapchain,VkPhysicalDevice *dev,VkDevice *vulk_dev,VkSurfaceKHR *surface, uint32_t family_index,VkExtent2D *extent) {
uint32_t format_count;
VkFormat format;
VkBool32 support;
vkGetPhysicalDeviceSurfaceSupportKHR(*dev, family_index, *surface, &support);
if (!support) {
fprintf(*Renderer::error_log, "%d :Surface is not supported.", __LINE__);
fclose(*Renderer::error_log);
exit(-1);
}
vkGetPhysicalDeviceSurfaceFormatsKHR(*dev, *surface, &format_count, nullptr);
vector<VkSurfaceFormatKHR> surface_format(format_count);
vkGetPhysicalDeviceSurfaceFormatsKHR(*dev, *surface, &format_count, surface_format.data());
if( 1 == format_count && surface_format[0].format == VK_FORMAT_UNDEFINED) {
format = VK_FORMAT_B8G8R8A8_UNORM;
infos.format.color_format = VK_FORMAT_B8G8R8A8_UNORM;
}else {
format = surface_format[0].format;
}
VkFormat depth_format = VK_FORMAT_D16_UNORM;
VkFormatProperties format_props;
vkGetPhysicalDeviceFormatProperties(*dev, depth_format, &format_props);
if (format_props.linearTilingFeatures & VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT) {
infos.tiling = VK_IMAGE_TILING_LINEAR;
}else if (format_props.optimalTilingFeatures & VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT) {
infos.tiling = VK_IMAGE_TILING_OPTIMAL;
}else {
fprintf(*Renderer::error_log, "%d: VK_FORMAT_D16_UNORM is not supported",__LINE__);
fclose(*Renderer::error_log);
exit(-1);
}
VkPresentModeKHR present_mode_selected = VK_PRESENT_MODE_FIFO_KHR;
uint32_t present_modes_c;
vkGetPhysicalDeviceSurfacePresentModesKHR(*dev, *surface, &present_modes_c, nullptr);
vector<VkPresentModeKHR> present_modes(present_modes_c);
vkGetPhysicalDeviceSurfacePresentModesKHR(*dev, *surface,&present_modes_c,present_modes.data());
for (int i = 0; i < present_modes_c; i++) {
if (present_modes[i] == VK_PRESENT_MODE_MAILBOX_KHR) {
cout << "Mailbox supported." << endl;
present_mode_selected = VK_PRESENT_MODE_MAILBOX_KHR;
}
}
VkSurfaceCapabilitiesKHR capabilities;
vkGetPhysicalDeviceSurfaceCapabilitiesKHR(*dev, *surface, &capabilities);
if (capabilities.maxImageExtent.width < (*extent).width) {
(*extent).width = capabilities.maxImageExtent.width;
}
if (capabilities.maxImageExtent.height < (*extent).height) {
(*extent).height = capabilities.maxImageExtent.height;
}
VkCompositeAlphaFlagBitsKHR composite_alpha = VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR;
if (capabilities.supportedCompositeAlpha & VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR) {
composite_alpha = VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR;
}else if (capabilities.supportedCompositeAlpha & VK_COMPOSITE_ALPHA_INHERIT_BIT_KHR) {
composite_alpha =VK_COMPOSITE_ALPHA_INHERIT_BIT_KHR;
}
VkSurfaceTransformFlagBitsKHR transform;
if (capabilities.supportedTransforms & VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR) {
transform = VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR;
}else {
transform = capabilities.currentTransform;
}
VkSwapchainCreateInfoKHR swapchain_ci = {};
swapchain_ci.sType = VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR;
swapchain_ci.pNext = NULL;
swapchain_ci.surface = *surface;
swapchain_ci.minImageCount = capabilities.minImageCount;
swapchain_ci.imageFormat = format;
swapchain_ci.imageExtent = capabilities.currentExtent;
swapchain_ci.preTransform = transform;
swapchain_ci.compositeAlpha = composite_alpha;
swapchain_ci.imageArrayLayers = 1;
swapchain_ci.presentMode = present_mode_selected;
swapchain_ci.oldSwapchain = VK_NULL_HANDLE;
swapchain_ci.clipped = true;
swapchain_ci.imageColorSpace = VK_COLORSPACE_SRGB_NONLINEAR_KHR;
swapchain_ci.imageUsage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
swapchain_ci.imageSharingMode = VK_SHARING_MODE_EXCLUSIVE;
swapchain_ci.queueFamilyIndexCount = 0;
if (VK_SUCCESS != vkCreateSwapchainKHR(*vulk_dev, &swapchain_ci, nullptr, swapchain)) {
fprintf(*Renderer::error_log, "%d: Couldn't create Swapchain", __LINE__);
fclose(*Renderer::error_log);
exit(-1);
}else {
cout << "Swapchain created successfully" << endl;
}
}
I recently had this exact same problem, the problem was caused by OpenGL context in a GLFW window.
So if you happen to be using GLFW, include a line
glfwWindowHint( GLFW_CLIENT_API, GLFW_NO_API );
before you create a window.
Related
My MVCE for SSL relay server:
#pragma once
#include <stdint.h>
#include <iostream>
#include <asio.hpp>
#include <asio/ssl.hpp>
namespace test
{
namespace setup
{
const uint32_t maxMessageSize = 1024 * 1024;
const uint32_t maxSessionsNum = 10;
}
enum class MessageType
{
LOG_ON = 0,
TEXT_MESSAGE = 1
};
class MessageHeader
{
public:
uint32_t messageType;
uint32_t messageLength;
MessageHeader(uint32_t messageType, uint32_t messageLength) : messageType(messageType), messageLength(messageLength) {}
};
class LogOn
{
public:
MessageHeader header;
uint32_t sessionId;
uint32_t isClient0;
LogOn() : header((uint32_t)MessageType::LOG_ON, sizeof(LogOn)) {}
};
class TextMessage
{
public:
MessageHeader header;
uint8_t data[];
TextMessage() : header((uint32_t)MessageType::TEXT_MESSAGE, sizeof(TextMessage)){}
};
class ClientSocket;
class Session
{
public:
ClientSocket* pClient0;
ClientSocket* pClient1;
};
Session* getSession(uint32_t sessionId);
class ClientSocket
{
public:
bool useTLS;
std::shared_ptr<asio::ip::tcp::socket> socket;
std::shared_ptr<asio::ssl::stream<asio::ip::tcp::socket>> socketSSL;
Session* pSession;
bool isClient0;
std::recursive_mutex writeBufferLock;
std::vector<char> readBuffer;
uint32_t readPos;
ClientSocket(asio::ip::tcp::socket& socket) : useTLS(false)
{
this->socket = std::make_shared<asio::ip::tcp::socket>(std::move(socket));
this->readBuffer.resize(setup::maxMessageSize + sizeof(MessageHeader));
this->readPos = 0;
}
ClientSocket(asio::ssl::stream<asio::ip::tcp::socket>& socket) : useTLS(true)
{
this->socketSSL = std::make_shared<asio::ssl::stream<asio::ip::tcp::socket>>(std::move(socket));
this->readBuffer.resize(setup::maxMessageSize + sizeof(MessageHeader));
this->readPos = 0;
}
bool writeSocket(uint8_t* pBuffer, uint32_t bufferSize)
{
try
{
std::unique_lock<std::recursive_mutex>
lock(this->writeBufferLock);
size_t writtenBytes = 0;
if (true == this->useTLS)
{
writtenBytes = asio::write(*this->socketSSL,
asio::buffer(pBuffer, bufferSize));
}
else
{
writtenBytes = asio::write(*this->socket,
asio::buffer(pBuffer, bufferSize));
}
return (writtenBytes == bufferSize);
}
catch (asio::system_error e)
{
std::cout << e.what() << std::endl;
}
catch (std::exception e)
{
std::cout << e.what() << std::endl;
}
catch (...)
{
std::cout << "Some other exception" << std::endl;
}
return false;
}
void asyncReadNextMessage(uint32_t messageSize)
{
auto readMessageLambda = [&](const asio::error_code errorCode, std::size_t length)
{
this->readPos += (uint32_t)length;
if (0 != errorCode.value())
{
//send socket to remove
printf("errorCode= %u, message=%s\n", errorCode.value(), errorCode.message().c_str());
//sendRemoveMeSignal();
return;
}
if ((this->readPos < sizeof(MessageHeader)))
{
asyncReadNextMessage(sizeof(MessageHeader) - this->readPos);
return;
}
MessageHeader* pMessageHeader = (MessageHeader*)this->readBuffer.data();
if (pMessageHeader->messageLength > setup::maxMessageSize)
{
//Message to big - should disconnect ?
this->readPos = 0;
asyncReadNextMessage(sizeof(MessageHeader));
return;
}
if (this->readPos < pMessageHeader->messageLength)
{
asyncReadNextMessage(pMessageHeader->messageLength - this->readPos);
return;
}
MessageType messageType = (MessageType)pMessageHeader->messageType;
switch(messageType)
{
case MessageType::LOG_ON:
{
LogOn* pLogOn = (LogOn*)pMessageHeader;
printf("LOG_ON message sessionId=%u, isClient0=%u\n", pLogOn->sessionId, pLogOn->isClient0);
this->isClient0 = pLogOn->isClient0;
this->pSession = getSession(pLogOn->sessionId);
if (this->isClient0)
this->pSession->pClient0 = this;
else
this->pSession->pClient1 = this;
}
break;
case MessageType::TEXT_MESSAGE:
{
TextMessage* pTextMessage = (TextMessage*)pMessageHeader;
if (nullptr != pSession)
{
if (this->isClient0)
{
if (nullptr != pSession->pClient1)
{
pSession->pClient1->writeSocket((uint8_t*)pTextMessage, pTextMessage->header.messageLength);
}
}
else
{
if (nullptr != pSession->pClient0)
{
pSession->pClient0->writeSocket((uint8_t*)pTextMessage, pTextMessage->header.messageLength);
}
}
}
}
break;
}
this->readPos = 0;
asyncReadNextMessage(sizeof(MessageHeader));
};
if (true == this->useTLS)
{
this->socketSSL->async_read_some(asio::buffer(this->readBuffer.data() + this->readPos, messageSize), readMessageLambda);
}
else
{
this->socket->async_read_some(asio::buffer(this->readBuffer.data() + this->readPos, messageSize), readMessageLambda);
}
}
};
class SSLRelayServer
{
public:
static SSLRelayServer* pSingleton;
asio::io_context ioContext;
asio::ssl::context sslContext;
std::vector<std::thread> workerThreads;
asio::ip::tcp::acceptor* pAcceptor;
asio::ip::tcp::endpoint* pEndpoint;
bool useTLS;
Session* sessions[setup::maxSessionsNum];
SSLRelayServer() : pAcceptor(nullptr), pEndpoint(nullptr), sslContext(asio::ssl::context::tlsv13_server)//sslContext(asio::ssl::context::sslv23)
{
this->useTLS = false;
this->pSingleton = this;
//this->sslContext.set_options(asio::ssl::context::default_workarounds | asio::ssl::context::no_sslv2);
this->sslContext.set_password_callback(std::bind(&SSLRelayServer::getPrivateKeyPEMFilePassword, this));
this->sslContext.use_certificate_chain_file("server_cert.pem");
this->sslContext.use_private_key_file("server_private_key.pem",
asio::ssl::context::pem);
}
static SSLRelayServer* getSingleton()
{
return pSingleton;
}
std::string getPrivateKeyPEMFilePassword() const
{
return "";
}
void addClientSocket(asio::ip::tcp::socket& socket)
{
ClientSocket* pClientSocket = new ClientSocket(socket); // use smart pointers
pClientSocket->asyncReadNextMessage(sizeof(MessageHeader));
}
void addSSLClientToken(asio::ssl::stream<asio::ip::tcp::socket>&sslSocket)
{
ClientSocket* pClientSocket = new ClientSocket(sslSocket); // use smart pointers
pClientSocket->asyncReadNextMessage(sizeof(MessageHeader));
}
void handleAccept(asio::ip::tcp::socket& socket, const asio::error_code& errorCode)
{
if (!errorCode)
{
printf("accepted\n");
if (true == socket.is_open())
{
asio::ip::tcp::no_delay no_delay_option(true);
socket.set_option(no_delay_option);
addClientSocket(socket);
}
}
}
void handleAcceptTLS(asio::ip::tcp::socket& socket, const asio::error_code& errorCode)
{
if (!errorCode)
{
printf("accepted\n");
if (true == socket.is_open())
{
asio::ip::tcp::no_delay no_delay_option(true);
asio::ssl::stream<asio::ip::tcp::socket> sslStream(std::move(socket), this->sslContext);
try
{
sslStream.handshake(asio::ssl::stream_base::server);
sslStream.lowest_layer().set_option(no_delay_option);
addSSLClientToken(sslStream);
}
catch (asio::system_error e)
{
std::cout << e.what() << std::endl;
return;
}
catch (std::exception e)
{
std::cout << e.what() << std::endl;
return;
}
catch (...)
{
std::cout << "Other exception" << std::endl;
return;
}
}
}
}
void startAccept()
{
auto acceptHandler = [this](const asio::error_code& errorCode, asio::ip::tcp::socket socket)
{
printf("acceptHandler\n");
handleAccept(socket, errorCode);
this->startAccept();
};
auto tlsAcceptHandler = [this](const asio::error_code& errorCode, asio::ip::tcp::socket socket)
{
printf("tlsAcceptHandler\n");
handleAcceptTLS(socket, errorCode);
this->startAccept();
};
if (true == this->useTLS)
{
this->pAcceptor->async_accept(tlsAcceptHandler);
}
else
{
this->pAcceptor->async_accept(acceptHandler);
}
}
bool run(uint32_t servicePort, uint32_t threadsNum, bool useTLS)
{
this->useTLS = useTLS;
this->pEndpoint = new asio::ip::tcp::endpoint(asio::ip::tcp::v4(), servicePort);
this->pAcceptor = new asio::ip::tcp::acceptor(ioContext, *pEndpoint);
this->pAcceptor->listen();
this->startAccept();
for (uint32_t threadIt = 0; threadIt < threadsNum; ++threadIt)
{
this->workerThreads.emplace_back([&]() {
#ifdef WINDOWS
SetThreadDescription(GetCurrentThread(), L"SSLRelayServer worker thread");
#endif
this->ioContext.run(); }
);
}
return true;
}
Session* getSession(uint32_t sessionId)
{
if (nullptr == this->sessions[sessionId])
{
this->sessions[sessionId] = new Session();
}
return this->sessions[sessionId];
}
};
SSLRelayServer* SSLRelayServer::pSingleton = nullptr;
Session* getSession(uint32_t sessionId)
{
SSLRelayServer* pServer = SSLRelayServer::getSingleton();
Session* pSession = pServer->getSession(sessionId);
return pSession;
}
class Client
{
public:
asio::ssl::context sslContext;
std::shared_ptr<asio::ip::tcp::socket> socket;
std::shared_ptr<asio::ssl::stream<asio::ip::tcp::socket>> socketSSL;
asio::io_context ioContext;
bool useTLS;
bool isClient0;
uint32_t readDataIt;
std::vector<uint8_t> readBuffer;
std::thread listenerThread;
Client() : sslContext(asio::ssl::context::tlsv13_client)//sslContext(asio::ssl::context::sslv23)
{
sslContext.load_verify_file("server_cert.pem");
//sslContext.set_verify_mode(asio::ssl::verify_peer);
using asio::ip::tcp;
using std::placeholders::_1;
using std::placeholders::_2;
sslContext.set_verify_callback(std::bind(&Client::verifyCertificate, this, _1, _2));
this->readBuffer.resize(setup::maxMessageSize);
this->readDataIt = 0;
}
bool verifyCertificate(bool preverified, asio::ssl::verify_context& verifyCtx)
{
return true;
}
void listenerRunner()
{
#ifdef WINDOWS
if (this->isClient0)
{
SetThreadDescription(GetCurrentThread(), L"listenerRunner client0");
}
else
{
SetThreadDescription(GetCurrentThread(), L"listenerRunner client1");
}
#endif
while (1==1)
{
asio::error_code errorCode;
size_t transferred = 0;
if (true == this->useTLS)
{
transferred = this->socketSSL->read_some(asio::buffer(this->readBuffer.data() + this->readDataIt, sizeof(MessageHeader) - this->readDataIt), errorCode);
}
else
{
transferred = this->socket->read_some(asio::buffer(this->readBuffer.data() + this->readDataIt, sizeof(MessageHeader) - this->readDataIt), errorCode);
}
this->readDataIt += transferred;
if (0 != errorCode.value())
{
this->readDataIt = 0;
continue;
}
if (this->readDataIt < sizeof(MessageHeader))
continue;
MessageHeader* pMessageHeader = (MessageHeader*)this->readBuffer.data();
if (pMessageHeader->messageLength > setup::maxMessageSize)
{
exit(1);
}
bool resetSocket = false;
while (pMessageHeader->messageLength > this->readDataIt)
{
printf("readDataIt=%u, threadId=%u\n", this->readDataIt, GetCurrentThreadId());
{
//message not complete
if (true == this->useTLS)
{
transferred = this->socketSSL->read_some(asio::buffer(this->readBuffer.data() + this->readDataIt, pMessageHeader->messageLength - this->readDataIt), errorCode);
}
else
{
transferred = this->socket->read_some(asio::buffer(this->readBuffer.data() + this->readDataIt, pMessageHeader->messageLength - this->readDataIt), errorCode);
}
this->readDataIt += transferred;
}
if (0 != errorCode.value())
{
exit(1);
}
}
MessageType messageType = (MessageType)pMessageHeader->messageType;
switch (messageType)
{
case MessageType::TEXT_MESSAGE:
{
TextMessage* pTextMessage = (TextMessage*)pMessageHeader;
printf("TEXT_MESSAGE: %s\n", pTextMessage->data);
}
break;
}
this->readDataIt = 0;
}
}
void run(uint32_t sessionId, bool isClient0, bool useTLS, uint32_t servicePort)
{
this->useTLS = useTLS;
this->isClient0 = isClient0;
if (useTLS)
{
socketSSL = std::make_shared<asio::ssl::stream<asio::ip::tcp::socket>>(ioContext, sslContext);
}
else
{
socket = std::make_shared<asio::ip::tcp::socket>(ioContext);
}
asio::ip::tcp::resolver resolver(ioContext);
asio::ip::tcp::resolver::results_type endpoints = resolver.resolve(asio::ip::tcp::v4(), "127.0.0.1", std::to_string(servicePort));
asio::ip::tcp::no_delay no_delay_option(true);
if (true == useTLS)
{
asio::ip::tcp::endpoint sslEndpoint = asio::connect(socketSSL->lowest_layer(), endpoints);
socketSSL->handshake(asio::ssl::stream_base::client);
socketSSL->lowest_layer().set_option(no_delay_option);
}
else
{
asio::ip::tcp::endpoint endpoint = asio::connect(*socket, endpoints);
socket->set_option(no_delay_option);
}
this->listenerThread = std::thread(&Client::listenerRunner, this);
LogOn logOn;
logOn.isClient0 = isClient0;
logOn.sessionId = sessionId;
const uint32_t logOnSize = sizeof(logOn);
if (true == useTLS)
{
size_t transferred = asio::write(*socketSSL, asio::buffer(&logOn, sizeof(LogOn)));
}
else
{
size_t transferred = asio::write(*socket, asio::buffer(&logOn, sizeof(LogOn)));
}
uint32_t counter = 0;
while (1 == 1)
{
std::string number = std::to_string(counter);
std::string message;
if (this->isClient0)
{
message = "Client0: " + number;
}
else
{
message = "Client1: " + number;
}
TextMessage textMessage;
textMessage.header.messageLength += message.size() + 1;
if (this->useTLS)
{
size_t transferred = asio::write(*socketSSL, asio::buffer(&textMessage, sizeof(TextMessage)));
transferred = asio::write(*socketSSL, asio::buffer(message.c_str(), message.length() + 1));
}
else
{
size_t transferred = asio::write(*socket, asio::buffer(&textMessage, sizeof(TextMessage)));
transferred = asio::write(*socket, asio::buffer(message.c_str(), message.length() + 1));
}
++counter;
//Sleep(1000);
}
}
};
void clientTest(uint32_t sessionId, bool isClient0, bool useTLS,
uint32_t servicePort)
{
#ifdef WINDOWS
if (isClient0)
{
SetThreadDescription(GetCurrentThread(), L"Client0");
}
else
{
SetThreadDescription(GetCurrentThread(), L"Client1");
}
#endif
Client client;
client.run(sessionId, isClient0, useTLS, servicePort);
while (1 == 1)
{
Sleep(1000);
}
}
void SSLRelayTest()
{
SSLRelayServer relayServer;
const uint32_t threadsNum = 1;
const bool useTLS = true;
const uint32_t servicePort = 777;
relayServer.run(servicePort, threadsNum, useTLS);
Sleep(5000);
std::vector<std::thread> threads;
const uint32_t sessionId = 0;
threads.emplace_back(clientTest, sessionId, true, useTLS, servicePort);
threads.emplace_back(clientTest, sessionId, false, useTLS,servicePort);
for (std::thread& threadIt : threads)
{
threadIt.join();
}
}
}
What this sample does ?
It runs SSL relay server on localhost port 777 which connects two clients and allows exchanging
of text messages between them.
Promblem:
When I run that sample server returns error "errorCode= 167772441, message=decryption failed or bad record mac (SSL routines)" in void "asyncReadNextMessage(uint32_t messageSize)"
I found out this is caused by client which reads and writes to client SSL socket from separate threads (changing variable useTLS to 0 runs it on normal socket which proves that it is SSL socket problem).
Apparently TLS is not full-duplex protocol (I did not know about that). I can't synchronize access to read and write with mutex because when socket enters read state and there is no
incoming message writing to socked will be blocked forever. At this thread Boost ASIO, SSL: How do strands help the implementation?
someone recommended using strands but someone else wrote that asio only synchronizes not concurrent execution of read and write handles which does not fix the problem.
I expect that somehow there is a way to synchronize read and write to SSL socket. I'm 100% sure that problem lies in synchronizing read and writes to socket because when I wrote example with read and write to socket done by one thread it worked. However then client always expects that there is message to read which can block all write if there is not. Can it be solved without using separate sockets for reads and writes ?
Okay I figured it out by writting many diffrent samples of code including SSL sockets.
When asio::io_context is already running you can't simply schedule asio::async_write or asio::async_read from thread which is not
associated with strand connected to that socket.
So when there is:
asio::async_write(*this->socketSSL, asio::buffer(pBuffer, bufferSize), asio::bind_executor(readWriteStrand,writeMessageLambda));
but thread which is executing is not running from readWriteStrand strand then it should be written as:
asio::post(ioContext, asio::bind_executor(readWriteStrand, [&]() {asio::async_read(*this->socketSSL, asio::buffer(readBuffer.data() + this->readDataIt, messageSize), asio::bind_executor(readWriteStrand, readMessageLambda)); }));
I'm getting VK_ERROR_FEATURE_NOT_PRESENT(-8).
But i'm using vkGetPhysicalDeviceFeatures to get features.
My Code:
std::vector<VkDeviceQueueCreateInfo> LogicalDevice::CreateDeviceQueueCreateInfos(QueueFamilies queueFamilies)
{
std::vector uniqueQueueFamilies = queueFamilies.GetUniqueQueueFamilies();
std::vector<VkDeviceQueueCreateInfo> queueCreateInfos;
for (auto queueFamily : uniqueQueueFamilies)
{
const int countOfQueues = queueFamily.CountOfQueues;
std::vector<float> queuePriorities(countOfQueues);
for (int indexOfPriority = 0; indexOfPriority < countOfQueues; indexOfPriority++)
{
queuePriorities[indexOfPriority] = 1.0f - ( (float) indexOfPriority / countOfQueues);
}
VkDeviceQueueCreateInfo queueCreateInfo{};
queueCreateInfo.sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO;
queueCreateInfo.queueFamilyIndex = queueFamily.Index.value();
queueCreateInfo.queueCount = queueFamily.CountOfQueues;
queueCreateInfo.flags = queueFamily.Flags;
queueCreateInfo.pQueuePriorities = queuePriorities.data();
queueCreateInfos.push_back(queueCreateInfo);
}
return queueCreateInfos;
}
VkDeviceCreateInfo LogicalDevice::GetDeviceCreateInfo(std::vector<VkDeviceQueueCreateInfo> deviceQueueCreateInfos, VkPhysicalDevice physicalDevice)
{
VkPhysicalDeviceFeatures deviceFeatures{};
vkGetPhysicalDeviceFeatures(physicalDevice, &deviceFeatures);
VkDeviceCreateInfo deviceCreateInfo{};
deviceCreateInfo.sType = VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO;
deviceCreateInfo.queueCreateInfoCount = static_cast<uint32_t>(deviceQueueCreateInfos.size());
deviceCreateInfo.pQueueCreateInfos = deviceQueueCreateInfos.data();
deviceCreateInfo.pEnabledFeatures = &deviceFeatures;
return deviceCreateInfo;
}
void LogicalDevice::Initialize(VkPhysicalDevice physicalDevice, VkSurfaceKHR surfaceForPickingPhysicalDevice)
{
m_queueFamilies = QueueFamilies::GetQueueFamilies(physicalDevice, surfaceForPickingPhysicalDevice);
std::vector<VkDeviceQueueCreateInfo> deviceQueueCreateInfos = CreateDeviceQueueCreateInfos(m_queueFamilies);
VkDeviceCreateInfo deviceCreateInfo = GetDeviceCreateInfo(deviceQueueCreateInfos, physicalDevice);
VkResult result = vkCreateDevice(physicalDevice, &deviceCreateInfo, nullptr, &m_vulkanDevice);
if (result != VK_SUCCESS)
{
throw new std::runtime_error("Cannot create logical device.");
}
}
The deviceFeature variable that you read the features into and which is pointed at in the create info structure is local to GetDeviceCreateInfo. This is out-of-scope at the point where you call vkCreateDevice, which results in undefined behavior. You're probably getting random junk at device creation time instead, which causes that error.
My code involves both Processing and Arduino. 5 different photocells are triggering 5 different sounds. My sound files play only when the ldrvalue is above the threshold.
The Null Pointer Exception is highlighted on this line
for (int i = 0; i < ldrValues.length; i++) {
I am not sure which part of my code should be changed so that I can run it.
import processing.serial.*;
import processing.sound.*;
SoundFile[] soundFiles = new SoundFile[5];
Serial myPort; // Create object from Serial class
int[] ldrValues;
int[] thresholds = {440, 490, 330, 260, 450};
int i = 0;
boolean[] states = {false, false, false, false, false};
void setup() {
size(200, 200);
println((Object[])Serial.list());
String portName = Serial.list()[3];
myPort = new Serial(this, portName, 9600);
soundFiles[0] = new SoundFile(this, "1.mp3");
soundFiles[1] = new SoundFile(this, "2.mp3");
soundFiles[2] = new SoundFile(this, "3.mp3");
soundFiles[3] = new SoundFile(this, "4.mp3");
soundFiles[4] = new SoundFile(this, "5.mp3");
}
void draw()
{
background(255);
//serial loop
while (myPort.available() > 0) {
String myString = myPort.readStringUntil(10);
if (myString != null) {
//println(myString);
ldrValues = int(split(myString.trim(), ','));
//println(ldrValues);
}
}
for (int i = 0; i < ldrValues.length; i++) {
println(states[i]);
println(ldrValues[i]);
if (ldrValues[i] > thresholds[i] && !states[i]) {
println("sensor " + i + " is activated");
soundFiles[i].play();
states[i] = true;
}
if (ldrValues[i] < thresholds[i]) {
println("sensor " + i + " is NOT activated");
soundFiles[i].stop();
states[i] = false;
}
}
}
You're approach is shall we say optimistic ? :)
It's always assuming there was a message from Serial, always formatted the right way so it could be parsed and there were absolutely 0 issues buffering data (incomplete strings, etc.))
The simplest thing you could do is check if the parsing was successful, otherwise the ldrValues array would still be null:
void draw()
{
background(255);
//serial loop
while (myPort.available() > 0) {
String myString = myPort.readStringUntil(10);
if (myString != null) {
//println(myString);
ldrValues = int(split(myString.trim(), ','));
//println(ldrValues);
}
}
// double check parsing int values from the string was successfully as well, not just buffering the string
if(ldrValues != null){
for (int i = 0; i < ldrValues.length; i++) {
println(states[i]);
println(ldrValues[i]);
if (ldrValues[i] > thresholds[i] && !states[i]) {
println("sensor " + i + " is activated");
soundFiles[i].play();
states[i] = true;
}
if (ldrValues[i] < thresholds[i]) {
println("sensor " + i + " is NOT activated");
soundFiles[i].stop();
states[i] = false;
}
}
}else{
// print a helpful debugging message otherwise
println("error parsing ldrValues from string: " + myString);
}
}
(Didn't know you could parse a int[] with int(): nice!)
I’m looking for a class like QSprite, which takes responsibility for reading pixmaps from a source file, split the source image into sequence of images and return sub image by index.
Something like this:
class QSprite2
{
bool load(QUrl url, QSize frameSize);
QImage getFrame(int index);
};
Is there any way to get described functionality with existing classes. Or maybe I have to implement the describe logic by myself?
You can use QQuickImageProvider. Here's a class I wrote that does something similar:
SpriteImageProvider.h:
#ifndef SPRITEIMAGEPROVIDER_H
#define SPRITEIMAGEPROVIDER_H
#include <QHash>
#include <QImage>
#include <QString>
#include <QQuickImageProvider>
#include "IsleGlobal.h"
class ISLE_EXPORT SpriteImageProvider : public QQuickImageProvider
{
public:
SpriteImageProvider();
QImage requestImage(const QString &id, QSize *size, const QSize &requestedSize) override;
private:
int parseFrameIndex(const QString &frameIndexStr) const;
int parseFrameWidth(const QString &frameWidthStr) const;
int parseFrameHeight(const QString &frameHeightStr) const;
QHash<QString, QImage> mImages;
};
#endif // SPRITEIMAGEPROVIDER_H
SpriteImageProvider.cpp:
#include "SpriteImageProvider.h"
#include <QImage>
#include <QDebug>
SpriteImageProvider::SpriteImageProvider() :
QQuickImageProvider(QQmlImageProviderBase::Image)
{
}
static const QRgb whiteRgba = qRgba(255, 255, 255, 255);
static const QRgb magentaRgba = qRgba(255, 0, 255, 255);
static const QRgb transparentRgba = qRgba(0, 0, 0, 0);
QImage SpriteImageProvider::requestImage(const QString &id, QSize *size, const QSize &)
{
QStringList args = id.split(QLatin1String(","));
if (args.length() < 1) {
qWarning() << "Must pass at least the file name as arguments for SpriteImageProvider source";
return QImage();
} else if (args.length() != 1 && args.length() != 2 && args.length() != 4) {
qWarning() << "Must pass either (fileName), (fileName, frameIndex) or"
<< "(fileName, frameWidth, frameHeight, frameIndex) as arguments for SpriteImageProvider source";
return QImage();
}
const QString imageFilename = ":/" + args.first();
int frameIndex = -2;
int frameWidth = -2;
int frameHeight = -2;
if (args.length() > 1) {
frameIndex = parseFrameIndex(args.last());
if (frameIndex == -1)
return QImage();
}
if (args.length() == 4) {
frameWidth = parseFrameIndex(args.at(1));
if (frameWidth == -1)
return QImage();
frameHeight = parseFrameIndex(args.at(2));
if (frameHeight == -1)
return QImage();
}
QHash<QString, QImage>::const_iterator it = mImages.find(imageFilename);
if (it == mImages.end()) {
QImage image(imageFilename);
if (image.isNull()) {
qWarning() << "Failed to load image at" << imageFilename;
return image;
}
// TODO: is there a better way of doing this?
QImage alphaImage = image;
for (int y = 0; y < alphaImage.height(); ++y) {
for (int x = 0; x < alphaImage.width(); ++x) {
const QRgb pixelRgb = alphaImage.pixel(x, y);
if (pixelRgb == whiteRgba || pixelRgb == magentaRgba)
alphaImage.setPixel(x, y, transparentRgba);
}
}
mImages.insert(imageFilename, alphaImage);
it = mImages.find(imageFilename);
}
if (frameWidth == -2 || frameHeight == -2) {
if (frameIndex == -2) {
// Use the whole image.
frameWidth = it.value().width();
frameHeight = it.value().height();
frameIndex = 0;
} else {
frameWidth = 64;
frameHeight = 64;
}
}
// Copy an individual frame out of the larger image.
const int framesWide = it.value().width() / frameWidth;
const QRect subRect((frameIndex % framesWide) * frameWidth,
(frameIndex / framesWide) * frameHeight,
frameWidth,
frameHeight);
// qDebug() << "id" << id;
// qDebug() << "framesWide" << framesWide;
// qDebug() << "subRect" << subRect;
const QImage frameImage = it.value().copy(subRect);
*size = frameImage.size();
return frameImage;
}
int SpriteImageProvider::parseFrameIndex(const QString &frameIndexStr) const
{
bool convertedToIntSuccessfully = false;
const int frameIndex = frameIndexStr.toInt(&convertedToIntSuccessfully);
if (!convertedToIntSuccessfully) {
qWarning() << "Failed to convert frame index" << frameIndexStr << "to an int";
return -1;
}
return frameIndex;
}
int SpriteImageProvider::parseFrameWidth(const QString &frameWidthStr) const
{
bool convertedToIntSuccessfully = false;
const int frameWidth = frameWidthStr.toInt(&convertedToIntSuccessfully);
if (!convertedToIntSuccessfully) {
qWarning() << "Failed to convert frame width" << frameWidthStr << "to an int";
return -1;
}
return frameWidth;
}
int SpriteImageProvider::parseFrameHeight(const QString &frameHeightStr) const
{
bool convertedToIntSuccessfully = false;
const int frameHeight = frameHeightStr.toInt(&convertedToIntSuccessfully);
if (!convertedToIntSuccessfully) {
qWarning() << "Failed to convert frame height" << frameHeightStr << "to an int";
return -1;
}
return frameHeight;
}
It also supports "alpha keys" - white and magenta pixels in the source image will be turned into transparent pixels.
It's registered with the QML engine like this:
mEngine->addImageProvider("sprite", new SpriteImageProvider);
and used in QML like this:
import QtQuick 2.5
Image {
source: qsTr('image://sprite/sprites/hugh.png,%1').arg(_sceneItemComponent ? _sceneItemComponent.facingDirection : 0)
}
where facingDirection is the frame index to use. There are also other arguments you can pass that are mentioned in the .cpp file.
I've followed iFrameExtractor to successfully stream rtsp in my swift project. In this project, it also has recording function. It basically use avformat_write_header
, av_interleaved_write_frame and av_write_trailer to save the rtsp source into mp4 file.
When I used this project in my device, the rtsp streaming works fine, but recording function will always generate a blank mp4 file with no image and sound.
Could anyone tell me what step that I miss?
I'm using iPhone5 with iOS 9.1 and XCode 7.1.1.
The ffmpeg is 2.8.3 version and followed the compile instruction by CompilationGuide – FFmpeg
Following is the sample code in this project
The function that generate every frame:
-(BOOL)stepFrame {
// AVPacket packet;
int frameFinished=0;
static bool bFirstIFrame=false;
static int64_t vPTS=0, vDTS=0, vAudioPTS=0, vAudioDTS=0;
while(!frameFinished && av_read_frame(pFormatCtx, &packet)>=0) {
// Is this a packet from the video stream?
if(packet.stream_index==videoStream) {
// 20130525 albert.liao modified start
// Initialize a new format context for writing file
if(veVideoRecordState!=eH264RecIdle)
{
switch(veVideoRecordState)
{
case eH264RecInit:
{
if ( !pFormatCtx_Record )
{
int bFlag = 0;
//NSString *videoPath = [NSHomeDirectory() stringByAppendingPathComponent:#"Documents/test.mp4"];
NSString *videoPath = #"/Users/liaokuohsun/iFrameTest.mp4";
const char *file = [videoPath UTF8String];
pFormatCtx_Record = avformat_alloc_context();
bFlag = h264_file_create(file, pFormatCtx_Record, pCodecCtx, pAudioCodecCtx,/*fps*/0.0, packet.data, packet.size );
if(bFlag==true)
{
veVideoRecordState = eH264RecActive;
fprintf(stderr, "h264_file_create success\n");
}
else
{
veVideoRecordState = eH264RecIdle;
fprintf(stderr, "h264_file_create error\n");
}
}
}
//break;
case eH264RecActive:
{
if((bFirstIFrame==false) &&(packet.flags&AV_PKT_FLAG_KEY)==AV_PKT_FLAG_KEY)
{
bFirstIFrame=true;
vPTS = packet.pts ;
vDTS = packet.dts ;
#if 0
NSRunLoop *pRunLoop = [NSRunLoop currentRunLoop];
[pRunLoop addTimer:RecordingTimer forMode:NSDefaultRunLoopMode];
#else
[NSTimer scheduledTimerWithTimeInterval:5.0//2.0
target:self
selector:#selector(StopRecording:)
userInfo:nil
repeats:NO];
#endif
}
// Record audio when 1st i-Frame is obtained
if(bFirstIFrame==true)
{
if ( pFormatCtx_Record )
{
#if PTS_DTS_IS_CORRECT==1
packet.pts = packet.pts - vPTS;
packet.dts = packet.dts - vDTS;
#endif
h264_file_write_frame( pFormatCtx_Record, packet.stream_index, packet.data, packet.size, packet.dts, packet.pts);
}
else
{
NSLog(#"pFormatCtx_Record no exist");
}
}
}
break;
case eH264RecClose:
{
if ( pFormatCtx_Record )
{
h264_file_close(pFormatCtx_Record);
#if 0
// 20130607 Test
dispatch_async(dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT, 0), ^(void)
{
ALAssetsLibrary *library = [[ALAssetsLibrary alloc]init];
NSString *filePathString = [NSHomeDirectory() stringByAppendingPathComponent:#"Documents/test.mp4"];
NSURL *filePathURL = [NSURL fileURLWithPath:filePathString isDirectory:NO];
if(1)// ([library videoAtPathIsCompatibleWithSavedPhotosAlbum:filePathURL])
{
[library writeVideoAtPathToSavedPhotosAlbum:filePathURL completionBlock:^(NSURL *assetURL, NSError *error){
if (error) {
// TODO: error handling
NSLog(#"writeVideoAtPathToSavedPhotosAlbum error");
} else {
// TODO: success handling
NSLog(#"writeVideoAtPathToSavedPhotosAlbum success");
}
}];
}
[library release];
});
#endif
vPTS = 0;
vDTS = 0;
vAudioPTS = 0;
vAudioDTS = 0;
pFormatCtx_Record = NULL;
NSLog(#"h264_file_close() is finished");
}
else
{
NSLog(#"fc no exist");
}
bFirstIFrame = false;
veVideoRecordState = eH264RecIdle;
}
break;
default:
if ( pFormatCtx_Record )
{
h264_file_close(pFormatCtx_Record);
pFormatCtx_Record = NULL;
}
NSLog(#"[ERROR] unexpected veVideoRecordState!!");
veVideoRecordState = eH264RecIdle;
break;
}
}
// Decode video frame
avcodec_decode_video2(pCodecCtx, pFrame, &frameFinished, &packet);
}
else if(packet.stream_index==audioStream)
{
// 20131024 albert.liao modfied start
static int vPktCount=0;
BOOL bIsAACADTS = FALSE;
int ret = 0;
if(aPlayer.vAACType == eAAC_UNDEFINED)
{
tAACADTSHeaderInfo vxAACADTSHeaderInfo = {0};
bIsAACADTS = [AudioUtilities parseAACADTSHeader:(uint8_t *)packet.data ToHeader:&vxAACADTSHeaderInfo];
}
#synchronized(aPlayer)
{
if(aPlayer==nil)
{
aPlayer = [[AudioPlayer alloc]initAudio:nil withCodecCtx:(AVCodecContext *) pAudioCodecCtx];
NSLog(#"aPlayer initAudio");
if(bIsAACADTS)
{
aPlayer.vAACType = eAAC_ADTS;
//NSLog(#"is ADTS AAC");
}
}
else
{
if(vPktCount<5) // The voice is listened once image is rendered
{
vPktCount++;
}
else
{
if([aPlayer getStatus]!=eAudioRunning)
{
dispatch_async(dispatch_get_main_queue(), ^(void) {
#synchronized(aPlayer)
{
NSLog(#"aPlayer start play");
[aPlayer Play];
}
});
}
}
}
};
#synchronized(aPlayer)
{
int ret = 0;
ret = [aPlayer putAVPacket:&packet];
if(ret <= 0)
NSLog(#"Put Audio Packet Error!!");
}
// 20131024 albert.liao modfied end
if(bFirstIFrame==true)
{
switch(veVideoRecordState)
{
case eH264RecActive:
{
if ( pFormatCtx_Record )
{
h264_file_write_audio_frame(pFormatCtx_Record, pAudioCodecCtx, packet.stream_index, packet.data, packet.size, packet.dts, packet.pts);
}
else
{
NSLog(#"pFormatCtx_Record no exist");
}
}
}
}
}
else
{
//fprintf(stderr, "packet len=%d, Byte=%02X%02X%02X%02X%02X\n",\
packet.size, packet.data[0],packet.data[1],packet.data[2],packet.data[3], packet.data[4]);
}
// 20130525 albert.liao modified end
}
return frameFinished!=0;
}
avformat_write_header:
int h264_file_create(const char *pFilePath, AVFormatContext *fc, AVCodecContext *pCodecCtx,AVCodecContext *pAudioCodecCtx, double fps, void *p, int len )
{
int vRet=0;
AVOutputFormat *of=NULL;
AVStream *pst=NULL;
AVCodecContext *pcc=NULL, *pAudioOutputCodecContext=NULL;
avcodec_register_all();
av_register_all();
av_log_set_level(AV_LOG_VERBOSE);
if(!pFilePath)
{
fprintf(stderr, "FilePath no exist");
return -1;
}
if(!fc)
{
fprintf(stderr, "AVFormatContext no exist");
return -1;
}
fprintf(stderr, "file=%s\n",pFilePath);
// Create container
of = av_guess_format( 0, pFilePath, 0 );
fc->oformat = of;
strcpy( fc->filename, pFilePath );
// Add video stream
pst = avformat_new_stream( fc, 0 );
vVideoStreamIdx = pst->index;
fprintf(stderr,"Video Stream:%d",vVideoStreamIdx);
pcc = pst->codec;
avcodec_get_context_defaults3( pcc, AVMEDIA_TYPE_VIDEO );
// Save the stream as origin setting without convert
pcc->codec_type = pCodecCtx->codec_type;
pcc->codec_id = pCodecCtx->codec_id;
pcc->bit_rate = pCodecCtx->bit_rate;
pcc->width = pCodecCtx->width;
pcc->height = pCodecCtx->height;
if(fps==0)
{
double fps=0.0;
AVRational pTimeBase;
pTimeBase.num = pCodecCtx->time_base.num;
pTimeBase.den = pCodecCtx->time_base.den;
fps = 1.0/ av_q2d(pCodecCtx->time_base)/ FFMAX(pCodecCtx->ticks_per_frame, 1);
fprintf(stderr,"fps_method(tbc): 1/av_q2d()=%g",fps);
pcc->time_base.num = 1;
pcc->time_base.den = fps;
}
else
{
pcc->time_base.num = 1;
pcc->time_base.den = fps;
}
// reference ffmpeg\libavformat\utils.c
// For SPS and PPS in avcC container
pcc->extradata = malloc(sizeof(uint8_t)*pCodecCtx->extradata_size);
memcpy(pcc->extradata, pCodecCtx->extradata, pCodecCtx->extradata_size);
pcc->extradata_size = pCodecCtx->extradata_size;
// For Audio stream
if(pAudioCodecCtx)
{
AVCodec *pAudioCodec=NULL;
AVStream *pst2=NULL;
pAudioCodec = avcodec_find_encoder(AV_CODEC_ID_AAC);
// Add audio stream
pst2 = avformat_new_stream( fc, pAudioCodec );
vAudioStreamIdx = pst2->index;
pAudioOutputCodecContext = pst2->codec;
avcodec_get_context_defaults3( pAudioOutputCodecContext, pAudioCodec );
fprintf(stderr,"Audio Stream:%d",vAudioStreamIdx);
fprintf(stderr,"pAudioCodecCtx->bits_per_coded_sample=%d",pAudioCodecCtx->bits_per_coded_sample);
pAudioOutputCodecContext->codec_type = AVMEDIA_TYPE_AUDIO;
pAudioOutputCodecContext->codec_id = AV_CODEC_ID_AAC;
// Copy the codec attributes
pAudioOutputCodecContext->channels = pAudioCodecCtx->channels;
pAudioOutputCodecContext->channel_layout = pAudioCodecCtx->channel_layout;
pAudioOutputCodecContext->sample_rate = pAudioCodecCtx->sample_rate;
pAudioOutputCodecContext->bit_rate = 12000;//pAudioCodecCtx->sample_rate * pAudioCodecCtx->bits_per_coded_sample;
pAudioOutputCodecContext->bits_per_coded_sample = pAudioCodecCtx->bits_per_coded_sample;
pAudioOutputCodecContext->profile = pAudioCodecCtx->profile;
//FF_PROFILE_AAC_LOW;
// pAudioCodecCtx->bit_rate;
// AV_SAMPLE_FMT_U8P, AV_SAMPLE_FMT_S16P
//pAudioOutputCodecContext->sample_fmt = AV_SAMPLE_FMT_FLTP;//pAudioCodecCtx->sample_fmt;
pAudioOutputCodecContext->sample_fmt = pAudioCodecCtx->sample_fmt;
//pAudioOutputCodecContext->sample_fmt = AV_SAMPLE_FMT_U8;
pAudioOutputCodecContext->sample_aspect_ratio = pAudioCodecCtx->sample_aspect_ratio;
pAudioOutputCodecContext->time_base.num = pAudioCodecCtx->time_base.num;
pAudioOutputCodecContext->time_base.den = pAudioCodecCtx->time_base.den;
pAudioOutputCodecContext->ticks_per_frame = pAudioCodecCtx->ticks_per_frame;
pAudioOutputCodecContext->frame_size = 1024;
fprintf(stderr,"profile:%d, sample_rate:%d, channles:%d", pAudioOutputCodecContext->profile, pAudioOutputCodecContext->sample_rate, pAudioOutputCodecContext->channels);
AVDictionary *opts = NULL;
av_dict_set(&opts, "strict", "experimental", 0);
if (avcodec_open2(pAudioOutputCodecContext, pAudioCodec, &opts) < 0) {
fprintf(stderr, "\ncould not open codec\n");
}
av_dict_free(&opts);
#if 0
// For Audio, this part is no need
if(pAudioCodecCtx->extradata_size!=0)
{
NSLog(#"extradata_size !=0");
pAudioOutputCodecContext->extradata = malloc(sizeof(uint8_t)*pAudioCodecCtx->extradata_size);
memcpy(pAudioOutputCodecContext->extradata, pAudioCodecCtx->extradata, pAudioCodecCtx->extradata_size);
pAudioOutputCodecContext->extradata_size = pAudioCodecCtx->extradata_size;
}
else
{
// For WMA test only
pAudioOutputCodecContext->extradata_size = 0;
NSLog(#"extradata_size ==0");
}
#endif
}
if(fc->oformat->flags & AVFMT_GLOBALHEADER)
{
pcc->flags |= CODEC_FLAG_GLOBAL_HEADER;
pAudioOutputCodecContext->flags |= CODEC_FLAG_GLOBAL_HEADER;
}
if ( !( fc->oformat->flags & AVFMT_NOFILE ) )
{
vRet = avio_open( &fc->pb, fc->filename, AVIO_FLAG_WRITE );
if(vRet!=0)
{
fprintf(stderr,"avio_open(%s) error", fc->filename);
}
}
// dump format in console
av_dump_format(fc, 0, pFilePath, 1);
vRet = avformat_write_header( fc, NULL );
if(vRet==0)
return 1;
else
return 0;
}
av_interleaved_write_frame:
void h264_file_write_frame(AVFormatContext *fc, int vStreamIdx, const void* p, int len, int64_t dts, int64_t pts )
{
AVStream *pst = NULL;
AVPacket pkt;
if ( 0 > vVideoStreamIdx )
return;
// may be audio or video
pst = fc->streams[ vStreamIdx ];
// Init packet
av_init_packet( &pkt );
if(vStreamIdx ==vVideoStreamIdx)
{
pkt.flags |= ( 0 >= getVopType( p, len ) ) ? AV_PKT_FLAG_KEY : 0;
//pkt.flags |= AV_PKT_FLAG_KEY;
pkt.stream_index = pst->index;
pkt.data = (uint8_t*)p;
pkt.size = len;
pkt.dts = AV_NOPTS_VALUE;
pkt.pts = AV_NOPTS_VALUE;
// TODO: mark or unmark the log
//fprintf(stderr, "dts=%lld, pts=%lld\n",dts,pts);
// av_write_frame( fc, &pkt );
}
av_interleaved_write_frame( fc, &pkt );
}
av_write_trailer:
void h264_file_close(AVFormatContext *fc)
{
if ( !fc )
return;
av_write_trailer( fc );
if ( fc->oformat && !( fc->oformat->flags & AVFMT_NOFILE ) && fc->pb )
avio_close( fc->pb );
av_free( fc );
}
Thanks.
It looks like you're using the same AVFormatContext for both the input and output?
In the line
pst = fc->streams[ vStreamIdx ];
You're assigning the AVStream* from your AVFormatContext connected with your input (RTSP stream). But then later on you're trying to write the packet back to the same context av_interleaved_write_frame( fc, &pkt );. I kind of think of a context as a file which has helped me navagate this type of thing better. I do something identicial to what you're doing (not iOS though) where I use a separate AVFormatContext for each of the input (RTSP stream) and output (mp4 file). If I'm correct, I think what you just need to do is initialize an AVFormatContext and properly.
The following code (without error checking everything) is what I do to take an AVFormatContext * output_format_context = NULL and the AVFormatContext * input_format_context that I had associated with the RTSP stream and write from one to the other. This is after I have fetched a packet, etc., which in your case it looks like you're populating (I just take the packet from av_read_frame and re-package it.
This is code that could be in your write frame function (but it also does include the writing of the header).
AVFormatContext * output_format_context;
AVStream * in_stream_2;
AVStream * out_stream_2;
// Allocate the context with the output file
avformat_alloc_output_context2(&output_format_context, NULL, NULL, out_filename.c_str());
// Point to AVOutputFormat * output_format for manipulation
output_format = output_format_context->oformat;
// Loop through all streams
for (i = 0; i < input_format_context->nb_streams; i++) {
// Create a pointer to the input stream that was allocated earlier in the code
AVStream *in_stream = input_format_context->streams[i];
// Create a pointer to a new stream that will be part of the output
AVStream *out_stream = avformat_new_stream(output_format_context, in_stream->codec->codec);
// Set time_base of the new output stream to equal the input stream one since I'm not changing anything (can avoid but get a deprecation warning)
out_stream->time_base = in_stream->time_base;
// This is the non-deprecated way of copying all the parameters from the input stream into the output stream since everything stays the same
avcodec_parameters_from_context(out_stream->codecpar, in_stream->codec);
// I don't remember what this is for :)
out_stream->codec->codec_tag = 0;
// This just sets a flag from the format context to the stream relating to the header
if (output_format_context->oformat->flags & AVFMT_GLOBALHEADER)
out_stream->codec->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
}
// Check NOFILE flag and open the output file context (previously the output file was associated with the format context, now it is actually opened.
if (!(output_format->flags & AVFMT_NOFILE))
avio_open(&output_format_context->pb, out_filename.c_str(), AVIO_FLAG_WRITE);
// Write the header (not sure if this is always needed but h264 I believe it is.
avformat_write_header(output_format_context,NULL);
// Re-getting the appropriate stream that was populated above (this should allow for both audio/video)
in_stream_2 = input_format_context->streams[packet.stream_index];
out_stream_2 = output_format_context->streams[packet.stream_index];
// Rescaling pts and dts, duration and pos - you would do as you need in your code.
packet.pts = av_rescale_q_rnd(packet.pts, in_stream_2->time_base, out_stream_2->time_base, (AVRounding) (AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
packet.dts = av_rescale_q_rnd(packet.dts, in_stream_2->time_base, out_stream_2->time_base, (AVRounding) (AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
packet.duration = av_rescale_q(packet.duration, in_stream_2->time_base, out_stream_2->time_base);
packet.pos = -1;
// The first packet of my stream always gives me negative dts/pts so this just protects that first one for my purposes. You probably don't need.
if (packet.dts < 0) packet.dts = 0;
if (packet.pts < 0) packet.pts = 0;
// Finally write the frame
av_interleaved_write_frame(output_format_context, &packet);
// ....
// Write header, close/cleanup... etc
// ....
This code is fairly bare bones and doesn't include the setup (which it sounds like you're doing correctly anyway). I would also imagine this code could be cleaned up and tweaked for your purposes, but this works for me to re-write the RTSP stream into a file (in my case many files but code not shown).
The code is C code, so you might need to do minor tweaks for making it Swift compatible (for some of the library function calls maybe). I think overall it should be compatible though.
Hopefully this helps point you to the right direction. This was cobbled together thanks to several sample code sources (I don't remember where), along with warning prompts from the libraries themselves.