I've been learning vulkan and following vulkan-tutorial and right now I'm at the Texture mapping part. I'm loading an image and uploading it to the host memory, but I'm having trouble understanding the layout transitions and barriers.
Consider this (pseudo)code for loading and transitioning an image (inspired by this), which will be sampled in a fragment shader:
auto texture = loadTexture(filePath);
auto stagingBuffer = createStagingBuffer(texture.pixels, texture.size);
// Create image with:
// usage - VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_SAMPLED_BIT
// properties - VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT
auto imageBuffer = createImage();
// -- begin single usage command buffer --
auto cb = beginCommandBuffer();
VkImageMemoryBarrier preCopyBarrier {
// ...
.image = imageBuffer.image,
.srcAccessMask = 0,
.dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT,
.oldLayout = VK_IMAGE_LAYOUT_UNDEFINED,
.newLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
// ...
};
// PipelineBarrier (preCopyBarrier):
// srcStageMask = VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT
// dstStageMask = VK_PIPELINE_STAGE_TRANSFER_BIT
// imageMemoryBarrier = &preCopyBarrier
vkCmdPipelineBarrier(cb, ...);
// Copy stagingBuffer.buffer to imageBuffer.image
// dstImageLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL
vkCmdCopyBufferToImage(cb, ...);
VkImageMemoryBarrier postCopyBarrier {
// ...
.image = imageBuffer.image,
.srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT,
.dstAccessMask = VK_ACCESS_SHADER_READ_BIT,
.oldLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
.newLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
// ...
};
// PipelineBarrier (postCopyBarrier):
// srcStageMask = VK_PIPELINE_STAGE_TRANSFER_BIT
// dstStageMask = VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT
// imageMemoryBarrier = &postCopyBarrier
vkCmdPipelineBarrier(cb, ...)
endAndSubmitCommandBuffer(cb);
The preCopyBarrier is there because of the vkCmdCopyBufferToImage(...) command and will be "used"/"activated" only once and that is during this command(?).
The postCopyBarrier is there because of the fact, that it will be sampled in the fragment shader, so the layout transition
VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL -> VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL
has to happen every single time a frame is rendered(? Please correct me, if I'm wrong).
But (assuming I'm correct, which I'm probably not) I'm having trouble wrapping my head around the fact, that I'm creating a preCopyBarrier, which will be used only once and postCopyBarrier, which will be used continuously. If I were to load for example 200 textures, I'd have a bunch of their single usage preCopyBarriers laying around. Isn't this a...waste?
This might be a stupid question and I'm probably missing/misunderstanding something important, but I feel like I shouldn't move on without understanding this concept correctly.
Related
There are two ways that can copy data to image(using stage buffer or not).In the first way that using stage buffer, when the image format is VK_FORMAT_R8G8B8A8_UNORM or VK_FORMAT_R8G8B8_UNORM, it works correctly.But in the way that not using stage buffer, the image format is VK_FORMAT_R8G8B8A8_UNORM, it works well. While changing the format to VK_FORMAT_R8G8B8_UNORM, the result of sample is not correct.The source data can be assured correct when setting different image format .
The code used is from [https://github.com/SaschaWillems/Vulkan/blob/master/examples/texture/texture.cpp](https://www.stackoverflow.com/
if (0/*useStaging*/) {
// Copy data to an optimal tiled image
// This loads the texture data into a host local buffer that is copied to the optimal tiled image on the device
// Create a host-visible staging buffer that contains the raw image data
// This buffer will be the data source for copying texture data to the optimal tiled image on the device
VkBuffer stagingBuffer;
VkDeviceMemory stagingMemory;
VkBufferCreateInfo bufferCreateInfo = vks::initializers::bufferCreateInfo();
bufferCreateInfo.size = ktxTextureSize;
// This buffer is used as a transfer source for the buffer copy
bufferCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT;
bufferCreateInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
VK_CHECK_RESULT(vkCreateBuffer(device, &bufferCreateInfo, nullptr, &stagingBuffer));
// Get memory requirements for the staging buffer (alignment, memory type bits)
vkGetBufferMemoryRequirements(device, stagingBuffer, &memReqs);
memAllocInfo.allocationSize = memReqs.size;
// Get memory type index for a host visible buffer
memAllocInfo.memoryTypeIndex = vulkanDevice->getMemoryType(memReqs.memoryTypeBits, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT);
VK_CHECK_RESULT(vkAllocateMemory(device, &memAllocInfo, nullptr, &stagingMemory));
VK_CHECK_RESULT(vkBindBufferMemory(device, stagingBuffer, stagingMemory, 0));
// Copy texture data into host local staging buffer
uint8_t *data;
VK_CHECK_RESULT(vkMapMemory(device, stagingMemory, 0, memReqs.size, 0, (void **)&data));
memcpy(data, ktxTextureData, ktxTextureSize);
vkUnmapMemory(device, stagingMemory);
// Setup buffer copy regions for each mip level
std::vector<VkBufferImageCopy> bufferCopyRegions;
uint32_t offset = 0;
for (uint32_t i = 0; i < texture.mipLevels; i++) {
// Calculate offset into staging buffer for the current mip level
ktx_size_t offset;
KTX_error_code ret = ktxTexture_GetImageOffset(ktxTexture, i, 0, 0, &offset);
assert(ret == KTX_SUCCESS);
// Setup a buffer image copy structure for the current mip level
VkBufferImageCopy bufferCopyRegion = {};
bufferCopyRegion.imageSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
bufferCopyRegion.imageSubresource.mipLevel = i;
bufferCopyRegion.imageSubresource.baseArrayLayer = 0;
bufferCopyRegion.imageSubresource.layerCount = 1;
bufferCopyRegion.imageExtent.width = ktxTexture->baseWidth >> i;
bufferCopyRegion.imageExtent.height = ktxTexture->baseHeight >> i;
bufferCopyRegion.imageExtent.depth = 1;
bufferCopyRegion.bufferOffset = offset;
bufferCopyRegions.push_back(bufferCopyRegion);
}
// Create optimal tiled target image on the device
VkImageCreateInfo imageCreateInfo = vks::initializers::imageCreateInfo();
imageCreateInfo.imageType = VK_IMAGE_TYPE_2D;
imageCreateInfo.format = format;
imageCreateInfo.mipLevels = texture.mipLevels;
imageCreateInfo.arrayLayers = 1;
imageCreateInfo.samples = VK_SAMPLE_COUNT_1_BIT;
imageCreateInfo.tiling = VK_IMAGE_TILING_OPTIMAL;
imageCreateInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
// Set initial layout of the image to undefined
imageCreateInfo.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
imageCreateInfo.extent = { texture.width, texture.height, 1 };
imageCreateInfo.usage = VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_SAMPLED_BIT;
VK_CHECK_RESULT(vkCreateImage(device, &imageCreateInfo, nullptr, &texture.image));
vkGetImageMemoryRequirements(device, texture.image, &memReqs);
memAllocInfo.allocationSize = memReqs.size;
memAllocInfo.memoryTypeIndex = vulkanDevice->getMemoryType(memReqs.memoryTypeBits, VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT);
VK_CHECK_RESULT(vkAllocateMemory(device, &memAllocInfo, nullptr, &texture.deviceMemory));
VK_CHECK_RESULT(vkBindImageMemory(device, texture.image, texture.deviceMemory, 0));
VkCommandBuffer copyCmd = vulkanDevice->createCommandBuffer(VK_COMMAND_BUFFER_LEVEL_PRIMARY, true);
// Image memory barriers for the texture image
// The sub resource range describes the regions of the image that will be transitioned using the memory barriers below
VkImageSubresourceRange subresourceRange = {};
// Image only contains color data
subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
// Start at first mip level
subresourceRange.baseMipLevel = 0;
// We will transition on all mip levels
subresourceRange.levelCount = texture.mipLevels;
// The 2D texture only has one layer
subresourceRange.layerCount = 1;
// Transition the texture image layout to transfer target, so we can safely copy our buffer data to it.
VkImageMemoryBarrier imageMemoryBarrier = vks::initializers::imageMemoryBarrier();;
imageMemoryBarrier.image = texture.image;
imageMemoryBarrier.subresourceRange = subresourceRange;
imageMemoryBarrier.srcAccessMask = 0;
imageMemoryBarrier.dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
imageMemoryBarrier.oldLayout = VK_IMAGE_LAYOUT_UNDEFINED;
imageMemoryBarrier.newLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL;
// Insert a memory dependency at the proper pipeline stages that will execute the image layout transition
// Source pipeline stage is host write/read execution (VK_PIPELINE_STAGE_HOST_BIT)
// Destination pipeline stage is copy command execution (VK_PIPELINE_STAGE_TRANSFER_BIT)
vkCmdPipelineBarrier(
copyCmd,
VK_PIPELINE_STAGE_HOST_BIT,
VK_PIPELINE_STAGE_TRANSFER_BIT,
0,
0, nullptr,
0, nullptr,
1, &imageMemoryBarrier);
// Copy mip levels from staging buffer
vkCmdCopyBufferToImage(
copyCmd,
stagingBuffer,
texture.image,
VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
static_cast<uint32_t>(bufferCopyRegions.size()),
bufferCopyRegions.data());
// Once the data has been uploaded we transfer to the texture image to the shader read layout, so it can be sampled from
imageMemoryBarrier.srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
imageMemoryBarrier.dstAccessMask = VK_ACCESS_SHADER_READ_BIT;
imageMemoryBarrier.oldLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL;
imageMemoryBarrier.newLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
// Insert a memory dependency at the proper pipeline stages that will execute the image layout transition
// Source pipeline stage is copy command execution (VK_PIPELINE_STAGE_TRANSFER_BIT)
// Destination pipeline stage fragment shader access (VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT)
vkCmdPipelineBarrier(
copyCmd,
VK_PIPELINE_STAGE_TRANSFER_BIT,
VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT,
0,
0, nullptr,
0, nullptr,
1, &imageMemoryBarrier);
// Store current layout for later reuse
texture.imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
vulkanDevice->flushCommandBuffer(copyCmd, queue, true);
// Clean up staging resources
vkFreeMemory(device, stagingMemory, nullptr);
vkDestroyBuffer(device, stagingBuffer, nullptr);
} else {
// Copy data to a linear tiled image
VkImage mappableImage;
VkDeviceMemory mappableMemory;
// Load mip map level 0 to linear tiling image
VkImageCreateInfo imageCreateInfo = vks::initializers::imageCreateInfo();
imageCreateInfo.imageType = VK_IMAGE_TYPE_2D;
imageCreateInfo.format = format;
imageCreateInfo.mipLevels = 1;
imageCreateInfo.arrayLayers = 1;
imageCreateInfo.samples = VK_SAMPLE_COUNT_1_BIT;
imageCreateInfo.tiling = VK_IMAGE_TILING_LINEAR;
imageCreateInfo.usage = VK_IMAGE_USAGE_SAMPLED_BIT;
imageCreateInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
imageCreateInfo.initialLayout = VK_IMAGE_LAYOUT_PREINITIALIZED;
imageCreateInfo.extent = { texture.width, texture.height, 1 };
VK_CHECK_RESULT(vkCreateImage(device, &imageCreateInfo, nullptr, &mappableImage));
// Get memory requirements for this image like size and alignment
vkGetImageMemoryRequirements(device, mappableImage, &memReqs);
// Set memory allocation size to required memory size
memAllocInfo.allocationSize = memReqs.size;
// Get memory type that can be mapped to host memory
memAllocInfo.memoryTypeIndex = vulkanDevice->getMemoryType(memReqs.memoryTypeBits, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT);
VK_CHECK_RESULT(vkAllocateMemory(device, &memAllocInfo, nullptr, &mappableMemory));
VK_CHECK_RESULT(vkBindImageMemory(device, mappableImage, mappableMemory, 0));
// Map image memory
void *data;
VK_CHECK_RESULT(vkMapMemory(device, mappableMemory, 0, memReqs.size, 0, &data));
// Copy image data of the first mip level into memory
memcpy(data, ktxTextureData, memReqs.size);
vkUnmapMemory(device, mappableMemory);
// Linear tiled images don't need to be staged and can be directly used as textures
texture.image = mappableImage;
texture.deviceMemory = mappableMemory;
texture.imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
// Setup image memory barrier transfer image to shader read layout
VkCommandBuffer copyCmd = vulkanDevice->createCommandBuffer(VK_COMMAND_BUFFER_LEVEL_PRIMARY, true);
// The sub resource range describes the regions of the image we will be transition
VkImageSubresourceRange subresourceRange = {};
subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
subresourceRange.baseMipLevel = 0;
subresourceRange.levelCount = 1;
subresourceRange.layerCount = 1;
// Transition the texture image layout to shader read, so it can be sampled from
VkImageMemoryBarrier imageMemoryBarrier = vks::initializers::imageMemoryBarrier();;
imageMemoryBarrier.image = texture.image;
imageMemoryBarrier.subresourceRange = subresourceRange;
imageMemoryBarrier.srcAccessMask = VK_ACCESS_HOST_WRITE_BIT;
imageMemoryBarrier.dstAccessMask = VK_ACCESS_SHADER_READ_BIT;
imageMemoryBarrier.oldLayout = VK_IMAGE_LAYOUT_PREINITIALIZED;
imageMemoryBarrier.newLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
// Insert a memory dependency at the proper pipeline stages that will execute the image layout transition
// Source pipeline stage is host write/read execution (VK_PIPELINE_STAGE_HOST_BIT)
// Destination pipeline stage fragment shader access (VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT)
vkCmdPipelineBarrier(
copyCmd,
VK_PIPELINE_STAGE_HOST_BIT,
VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT,
0,
0, nullptr,
0, nullptr,
1, &imageMemoryBarrier);
vulkanDevice->flushCommandBuffer(copyCmd, queue, true);
}
)
I am attempting to calculate some statistics for pixel values using openlayers 6.3.1 & I am having an issue iterating over all pixels. I have read the docs for the pixels array that gets passed to the operation callback and it states:
For pixel type operations, the function will be called with an array
of * pixels, where each pixel is an array of four numbers ([r, g, b, a]) in the * range of 0 - 255. It should return a single pixel
array.
I have taken this to mean that the array passed contains all the pixels but everything I do seems to prove that I only get the current pixel to work on.
if(this.rasterSource == null) {
this.rasterSource = new Raster({
sources: [this.imageLayer],
operation: function (pixels, data) {
data['originalPixels'] = pixels;
if(!isSetUp) {
// originalPixels = pixels as number[][];
// const originalPixels = Array.from(pixels as number[][]);
// let originals = generateOriginalHistograms(pixels as number[][]);
isSetUp = true;
}
// console.log(pixels[0]);
let pixel = pixels[0];
pixel[data['channel']] = data['value'];
return pixel;
},
lib: {
isSetUp: isSetUp,
numBins: numBins,
// originalPixels: originalPixels,
// originalRed: originalRed,
// originalGreen: originalGreen,
// originalBlue: originalBlue,
generateOriginalHistograms: generateOriginalHistograms,
}
});
this.rasterSource.on('beforeoperations', function(event) {
event.data.channel = 0;
event.data.value = 255;
});
this.rasterSource.on('afteroperations', function(event) {
console.debug("After Operations");
});
I have realised that I cannot pass arrays through the lib object so I have had to stop attempting that. These are the declarations I am currently using:
const numBins = 256;
var isSetUp: boolean = false;
function generateOriginalHistograms(pixels: number[][]) {
let originalRed = new Array(numBins).fill(0);
let originalGreen = new Array(numBins).fill(0);
let originalBlue = new Array(numBins).fill(0);
for(let i = 0; i < numBins; ++i) {
originalRed[Math.floor(pixels[i][0])]++
originalGreen[Math.floor(pixels[i][1])]++;
originalBlue[Math.floor(pixels[i][2])]++;
}
return {red: originalRed, blue: originalBlue, green: originalGreen};
}
& they are declared outside of the current angular component that I am writing this in. I did have another question on this but I have since realised that I was way off in what I could and couldn't use here;
This now runs and, as it is currently commented will tint the image red. But the value of data['originalPixels'] = pixels; only ever produces one pixel. Can anyone tell me why this is & what I need to do to access the whole pixel array. I have tried to slice & spread the array to no avail. If I uncomment the line // let originals = generateOriginalHistograms(pixels as number[][]); I get an error
Uncaught TypeError: Cannot read properties of undefined (reading '0')
generateOriginalHistograms # blob:http://localhos…a7fa-b5a410582c06:6
(anonymous) # blob:http://localhos…7fa-b5a410582c06:76
(anonymous) # blob:http://localhos…7fa-b5a410582c06:62
(anonymous) # blob:http://localhos…7fa-b5a410582c06:83
& if I uncomment the line // console.log(pixels[0]); I get all the pixel values streaming in the console but quite slowly.
The answer appears to be change the operationType to 'image' and work with the ImageData object.
this.rasterSource = new Raster({
sources: [this.imageLayer],
operationType: "image",
operation: function (pixels, data) {
let imageData = pixels[0] as ImageData;
...
I now have no issues calculating the stats I need.
This occurs in my attempts to render metal with a CAMetalLayer, and in a lot of 'Metal By Example' sample code I download. The problem is with the 'texture' I guess, here's some code, I can't provide all of it, but I'll try to provide the most relevant parts. It doesn't accept the texture descriptor, printing this into the console.
failed assertion `MTLTextureDescriptor: Depth, Stencil, DepthStencil, and Multisample textures must be allocated with the MTLStorageModePrivate or MTLStorageModeMemoryless storage mode.'
- (void)buildDepthTexture
{
CGSize drawableSize = self.layer.drawableSize;
MTLTextureDescriptor *descriptor = [MTLTextureDescriptor texture2DDescriptorWithPixelFormat:MTLPixelFormatDepth32Float
width:drawableSize.width
height:drawableSize.height
mipmapped:NO];
self.depthTexture = [self.device newTextureWithDescriptor:descriptor]; // Thread 1: signal SIGABRT
[self.depthTexture setLabel:#"Depth Texture"];
}
Again, this is sample code that presumably worked, but no longer does. So I'm like OK, let's allocate it with private storage mode or some junk. descriptor.storageMode = MTLStorageModePrivate;
But when I do that, the render pass descriptor can't be created in draw.
failed assertion `Texture at depthAttachment has usage (0x01) which doesn't specify MTLTextureUsageRenderTarget (0x04)'
MTLRenderPassDescriptor *renderPass = [self newRenderPassWithColorAttachmentTexture:[drawable texture]];
id<MTLCommandBuffer> commandBuffer = [self.commandQueue commandBuffer];
id<MTLRenderCommandEncoder> commandEncoder = [commandBuffer renderCommandEncoderWithDescriptor:renderPass]; //Thread 1: signal SIGABRT
Here's the code for newRenderPassWithColorAttachmentTexture.
- (MTLRenderPassDescriptor *)newRenderPassWithColorAttachmentTexture:(id<MTLTexture>)texture
{
MTLRenderPassDescriptor *renderPass = [MTLRenderPassDescriptor new];
renderPass.colorAttachments[0].texture = texture;
renderPass.colorAttachments[0].loadAction = MTLLoadActionClear;
renderPass.colorAttachments[0].storeAction = MTLStoreActionStore;
renderPass.colorAttachments[0].clearColor = MBEClearColor;
renderPass.depthAttachment.texture = self.depthTexture;
renderPass.depthAttachment.loadAction = MTLLoadActionClear;
renderPass.depthAttachment.storeAction = MTLStoreActionStore;
renderPass.depthAttachment.clearDepth = 1.0;
return renderPass;
}
So basically, it seems two different stages of rendering require two different mutually exclusive conditions to be the case. If one's works, the other doesn't work, and vice versa. Seems impossible, seriously, what am I supposed to do? What gives?
You should provide texture usage description:
- (void)buildDepthTexture
{
CGSize drawableSize = self.layer.drawableSize;
MTLTextureDescriptor *descriptor = [MTLTextureDescriptor texture2DDescriptorWithPixelFormat:MTLPixelFormatDepth32Float
width:drawableSize.width
height:drawableSize.height
mipmapped:NO];
descriptor.storageMode = MTLStorageModePrivate;
descriptor.usage = MTLTextureUsageRenderTarget | MTLTextureUsageShaderRead | MTLTextureUsageShaderWrite;
self.depthTexture = [self.device newTextureWithDescriptor:descriptor]; // Thread 1: signal SIGABRT
[self.depthTexture setLabel:#"Depth Texture"];
}
I created a SCNNode and added an Audio to it.
It is a Mono audio. Everything is set up correctly.
It is working as Spatial Audio, that's not the problem.
The problem is that as i get closer or far away it barely changes the volume. I know it changes if i get very very far away, but it's nothing like Apple demonstrated here:
https://youtu.be/d9kb1LfNNU4?t=23
Some other games i see the audio volume really changing from one step distance.
With mine, with one step you can't even tell the volume changed. You need at least 4 steps.
Anyone has any clue why?
Code bellow:
SCNNode *audioNode = [[SCNNode alloc] init];
SCNAudioSource *audioSource = [[SCNAudioSource alloc] initWithFileNamed:audioFileName];
audioSource.loops = YES;
[audioSource load];
audioSource.volume = 0.05; // <-- i used different values. won't change much either
audioSource.positional = YES;
//audioSource.shouldStream = NO; // <-- makes no difference
[audioNode addAudioPlayer:[SCNAudioPlayer audioPlayerWithSource:audioSource]];
[audioNode runAction:[SCNAction playAudioSource:audioSource waitForCompletion:NO] completionHandler:nil];
[massNode addChildNode:audioNode];
Maybe scale of the nodes?
The whole scene is the size of around 4 feet.
When i add an object i usually scale it to 0.005 (otherwise it gets way too big).
But i also tried with one that was already in the right size from .scn file.
It shouldn't affect anything tho, since the result is a coffee table size scene and i can see the objects alright.
Updated.
Here's a working code for controlling sound's decay (works in iOS and macOS):
import AVFoundation
import ARKit
class ViewController: UIViewController, AVAudioMixing {
#IBOutlet var sceneView: SCNView!
// #IBOutlet var sceneView: ARSCNView!
func destination(forMixer mixer: AVAudioNode,
bus: AVAudioNodeBus) -> AVAudioMixingDestination? {
return nil
}
var volume: Float = 0.0
var pan: Float = 0.0
var sourceMode: AVAudio3DMixingSourceMode = .bypass
var pointSourceInHeadMode: AVAudio3DMixingPointSourceInHeadMode = .bypass
var renderingAlgorithm = AVAudio3DMixingRenderingAlgorithm.sphericalHead
var rate: Float = 1.2
var reverbBlend: Float = 40.0
var obstruction: Float = -100.0
var occlusion: Float = -100.0
var position = AVAudio3DPoint(x: 0, y: 0, z: 10)
let audioNode = SCNNode()
override func viewDidLoad() {
super.viewDidLoad()
let myScene = SCNScene()
let cameraNode = SCNNode()
cameraNode.camera = SCNCamera()
cameraNode.position = SCNVector3(0, 0, 0)
myScene.rootNode.addChildNode(cameraNode)
// let sceneView = view as! SCNView
sceneView.scene = myScene
sceneView.backgroundColor = UIColor.orange
let myPath = Bundle.main.path(forResource: "Mono_Audio", ofType: "mp3")
let myURL = URL(fileURLWithPath: myPath!)
let mySource = SCNAudioSource(url: myURL)!
mySource.loops = true
mySource.isPositional = true // Positional Audio
mySource.shouldStream = false // FALSE for Positional Audio
mySource.volume = volume
mySource.reverbBlend = reverbBlend
mySource.rate = rate
mySource.load()
let player = SCNAudioPlayer(source: mySource)
let sphere: SCNGeometry = SCNSphere(radius: 0.1)
let sphereNode = SCNNode(geometry: sphere)
sphereNode.addChildNode(audioNode)
myScene.rootNode.addChildNode(sphereNode)
audioNode.addAudioPlayer(player)
sceneView.audioEnvironmentNode.distanceAttenuationParameters.maximumDistance = 2
sceneView.audioEnvironmentNode.distanceAttenuationParameters.referenceDistance = 0.1
sceneView.audioEnvironmentNode.renderingAlgorithm = .auto
// sceneView.audioEnvironmentNode.reverbParameters.enable = true
// sceneView.audioEnvironmentNode.reverbParameters.loadFactoryReverbPreset(.plate)
let hither = SCNAction.moveBy(x: 0, y: 0, z: 1, duration: 2)
let thither = SCNAction.moveBy(x: 0, y: 0, z: -1, duration: 2)
let sequence = SCNAction.sequence([hither, thither])
let loop = SCNAction.repeatForever(sequence)
sphereNode.runAction(loop)
}
}
And, yes, you're absolutely right – there are some obligatory settings.
But there are 7 of them:
use AVAudioMixing protocol with its stubs (properties and methods).
use MONO audio file.
use source.isPositional = true.
use source.shouldStream = false.
assign maximumDistance value to distanceAttenuationParameters property.
assign referenceDistance value to distanceAttenuationParameters property.
and location of mySource.load() is very important in your code.
P.S. If the aforementioned tips didn't help you, then use additional instance properties to make your sound even quieter using a graph, obstacles and orientation of implicit listener:
var rolloffFactor: Float { get set } // attenuation's graph, default = 1
var obstruction: Float { get set } // default = 0.0
var occlusion: Float { get set } // default = 0.0
var listenerAngularOrientation: AVAudio3DAngularOrientation { get set } //(0,0,0)
It definitely works if you'll write it in Objective-C.
In this example the distance of audioNode is 1 meter away from a listener.
If none of the above answers seem to work, try the following code:
sceneView.audioEnvironmentNode.reverbParameters.enable = true
And if even these seem to barely work, or if you wanna optimal performance, there is a property called level where you can set the level of how spatial the code can be.
sceneView.audioEnvironmentNode.reverbParameters.level = 40
(the level of the reverbParameters ranges between -40 to 40 parameters)
I am working with EGL on an ARM GPU, and I am using a pbuffer to do off screen rendering. I follow the standard procedures as described in the documentation to set everything up:
EGLDisplay display;
EGLConfig config;
EGLContext context;
EGLSurface surface;
EGLint num_config;
// assume I allocated both attrib lists somewhere
attribute_list[0] = EGL_SURFACE_TYPE;
attribute_list[1] = EGL_PBUFFER_BIT;
attribute_list[2] = EGL_RENDERABLE_TYPE;
attribute_list[3] = EGL_OPENGL_ES2_BIT;
attribute_list[4] = EGL_OPENGL_RED_SIZE;
attribute_list[5] = 8;
attribute_list[6] = EGL_OPENGL_GREEN_SIZE;
attribute_list[7] = 8;
attribute_list[8] = EGL_OPENGL_BLUE_SIZE;
attribute_list[9] = 8;
attribute_list[9] = EGL_OPENGL_ALPHA_SIZE;
attribute_list[10] = 8;
attribute_list[11] = EGL_OPENGL_DEPTH_SIZE;
attribute_list[12] = 8;
attribute_list[13] = EGL_NONE;
pbuffer_attribs[0] = EGL_WIDTH;
pbuffer_attribs[1] = 512;
pbuffer_attribs[2] = EGL_HEIGHT;
pbuffer_attribs[3] = 512;
pbuffer_attribs[4] = EGL_NONE;
/* get an EGL display connection */
display = eglGetDisplay(EGL_DEFAULT_DISPLAY);
/* initialize the EGL display connection */
eglInitialize(display, NULL, NULL);
/* get an appropriate EGL frame buffer configuration */
eglChooseConfig(display, attribute_list, &config, 1, &num_config);
/* create an EGL rendering context */
context = eglCreateContext(display, config, EGL_NO_CONTEXT, NULL);
/* create the surface */
surface = eglCreatePbufferSurface(display, config, pbuffer_attribs);
/* connect the context to the surface */
eglMakeCurrent(display, surface, surface, context);
After this, my reads and writes should be associated with this offscreen pBuffer, correct? Does this pBuffer have a FBO which is distinct from the default framebuffer associated with it? The issue I am running into is I get a GL_FRAMEBUFFER_UNDEFINED error when I try to glReadPixels. This error happens when:
GL_FRAMEBUFFER_UNDEFINED is returned if target is the default framebuffer, but the default framebuffer does not exist.
My reading of this error is I am rendering to the default FBO and not to the pBuffer FBO. Is this interpretation correct? If so, what else do I need to do so I can read and write to the pBuffer FBO?
If the above sequence completes successfully (without errors), then, yes, the offscreen pBuffer becomes the default framebuffer for the OpenGL ES context and all reads and writes will be associated with the pBuffer (unless a non-default FBO is bound).
It's worth checking that eglGetError() returns EGL_SUCCESS after each EGL call. The following part of your code listing looks suspicious:
attribute_list[9] = 8;
attribute_list[9] = EGL_OPENGL_ALPHA_SIZE;