SDF WebGPU. How to modify the code for SDF purpose like in ShaderToy.com? - fragment-shader

I want to create a WebGPU version of Shadertoy,
but I can't to prepare the code correctly.
How to draw in #fragment shader for SDF in WebGPU ?
I clipped space [-1,1, 1,1, -1,-1, 1,-1] of canvas
but what I need to do next ?
<!DOCTYPE html>
<title>SDF-WebGPU</title>
<canvas></canvas><script>'use strict';
const canvas = document.body.firstChild;
canvas.style = `
display: block;
image-rendering: pixelated;
background-color: #ccc;
user-select: none;
touch-action: none;
width: ${ canvas.width = 480 * devicePixelRatio, 480 }px;
height: ${ canvas.height = 360 * devicePixelRatio, 360 }px;
`;
const init = async function(){
const
context = canvas.getContext(`webgpu`),
format = navigator.gpu.getPreferredCanvasFormat(),
adapter = await navigator.gpu.requestAdapter(),
device = await adapter.requestDevice(),
Q = device.queue,
{VERTEX, COPY_DST} = GPUBufferUsage,
SPACE_B = new Float32Array([-1,1, 1,1, -1,-1, 1,-1]),
B = device.createBuffer({
label: `SPACE`,
size: SPACE_B.byteLength,
usage: VERTEX | COPY_DST
}),
P = device.createRenderPipeline({
layout: `auto`,
vertex: {
module: device.createShaderModule({
code: `#vertex
fn vSh(#location(0) p:vec2<f32>) -> #builtin(position) vec4<f32>{
return vec4<f32>(p,0,1); // (p[x,y],z,w)
}`
}),
entryPoint: `vSh`,
buffers: [{
arrayStride: 8, // 2*4 = 2 floats x 4 bytes
attributes: [{
shaderLocation: 0,
offset: 0,
format: `float32x2`
}]
}], // buffers
},
fragment: {
module: device.createShaderModule({
code: `#fragment
fn fSh() -> #location(0) vec4<f32>{
return vec4<f32>(.082,.263,.455,1);
}`
}),
entryPoint: `fSh`,
targets: [ {format} ]
},
primitive:{
topology: `triangle-strip`
}
}), // Pipeline
frame=()=>{
const
C = device.createCommandEncoder(),
R = C.beginRenderPass({
colorAttachments:[{
view: context.getCurrentTexture().createView(),
loadOp: `clear`,
storeOp: `store`
}]
});
R.setPipeline(P);
R.setVertexBuffer(0,B);
R.draw(4);
R.end();
Q.submit([ C.finish() ])
}; // frame
context.configure({ device, format, alphaMode: `opaque` });
Q.writeBuffer(B,0, SPACE_B);
frame()
}() // init
</script>
I was only able to create a version without SDF.
If you know any references about WebGPU SDF, please share with me.
Thanks !

Shaders in WebGPU are written in WGSL instead of GLSL but nearly every concept in GLSL has a similar feature in WGSL
You'll either have to read the spec or look at examples to figure out how to translate from GLSL to WGSL but it's not that hard :P
Here's a GLSL SDF shader
// The MIT License
// Copyright © 2020 Inigo Quilez
// Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
// Signed distance to a disk
// List of some other 2D distances: https://www.shadertoy.com/playlist/MXdSRf
//
// and iquilezles.org/articles/distfunctions2d
float sdCircle( in vec2 p, in float r )
{
return length(p)-r;
}
void mainImage( out vec4 fragColor,ain vec2 fragCoord )
{
vec2 p = (2.0*fragCoord-iResolution.xy)/iResolution.y;
vec2 m = (2.0*iMouse.xy-iResolution.xy)/iResolution.y;
float d = sdCircle(p,0.5);
// coloring
vec3 col = (d>0.0) ? vec3(0.9,0.6,0.3) : vec3(0.65,0.85,1.0);
col *= 1.0 - exp(-6.0*abs(d));
col *= 0.8 + 0.2*cos(150.0*d);
col = mix( col, vec3(1.0), 1.0-smoothstep(0.0,0.01,abs(d)) );
if( iMouse.z>0.001 )
{
d = sdCircle(m,0.5);
col = mix(col, vec3(1.0,1.0,0.0), 1.0-smoothstep(0.0, 0.005, abs(length(p-m)-abs(d))-0.0025));
col = mix(col, vec3(1.0,1.0,0.0), 1.0-smoothstep(0.0, 0.005, length(p-m)-0.015));
}
fragColor = vec4(col,1.0);
}
Here it translated to WGSL
// The MIT License
// Copyright © 2020 Inigo Quilez
// Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
// Signed distance to a disk
// List of some other 2D distances: https://www.shadertoy.com/playlist/MXdSRf
//
// and iquilezles.org/articles/distfunctions2d
fn sdCircle( p: vec2f, r: f32 ) -> f32
{
return length(p)-r;
}
struct Uniforms {
iResolution: vec3f,
iMouse: vec4f,
};
#group(0) #binding(0) var<uniform> u: Uniforms;
fn mainImage( fragColor: ptr<function, vec4f>, fragCoord: vec2f )
{
let p = (2.0*fragCoord-u.iResolution.xy)/u.iResolution.y;
let m = (2.0*u.iMouse.xy-u.iResolution.xy)/u.iResolution.y;
var d = sdCircle(p,0.5);
// coloring
var col = select(vec3(0.9,0.6,0.3), vec3(0.65,0.85,1.0), d>0.0);
col *= 1.0 - exp(-6.0*abs(d));
col *= 0.8 + 0.2*cos(150.0*d);
col = mix( col, vec3f(1.0), 1.0-smoothstep(0.0,0.01,abs(d)) );
if( u.iMouse.z>0.001 )
{
d = sdCircle(m,0.5);
col = mix(col, vec3(1.0,1.0,0.0), 1.0-smoothstep(0.0, 0.005, abs(length(p-m)-abs(d))-0.0025));
col = mix(col, vec3(1.0,1.0,0.0), 1.0-smoothstep(0.0, 0.005, length(p-m)-0.015));
}
*fragColor = vec4f(col,1.0);
}
I can't write all the differences but a few easy to explain things
In GLSL a function is returnType name(type1 arg1, type2 arg2) { ... }
In WGSL a function is fn name(arg1: type1, arg2: type2) -> returnType { ... }
In GLSL a variable is type nameOfVar
In WGSL a variable is var nameOfVar: type except that in WGSL you don't have to specify the type if WGSL can figure it out.
In other words, these are the same
var a: f32 = 123.0; // a's type is f32
var b: = 123.0; // b's type is f32
Note: confusingly, var is like let in JavaScript. You can reassign it. let is like const in JavaScript. You can't change it after assignment.
GLSL has the ? operator as in v = condition ? t : f
WGSL has select as in v = select(f, t, condition)
types
GLSL | WGSL
------+------
float | f32
int | i32
vec4 | vec4f
ivec4 | vec4i
etc...
One more issue, in WebGL gl_FragCoord.y goes from 0 at the bottom to height of canvas at the top. WebGPU's equivalent #builtin(position) goes from 0 at the top to height of canvas at the bottom.
Here's a live version of that SDF shader. Drag the mouse on the image
const code = `
// The MIT License
// Copyright © 2020 Inigo Quilez
// Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
// Signed distance to a disk
// List of some other 2D distances: https://www.shadertoy.com/playlist/MXdSRf
//
// and iquilezles.org/articles/distfunctions2d
fn sdCircle( p: vec2f, r: f32 ) -> f32
{
return length(p)-r;
}
struct Uniforms {
iResolution: vec3f,
iMouse: vec4f,
};
#group(0) #binding(0) var<uniform> u: Uniforms;
fn mainImage( fragColor: ptr<function, vec4f>, fragCoord: vec2f )
{
let p = (2.0*fragCoord-u.iResolution.xy)/u.iResolution.y;
let m = (2.0*u.iMouse.xy-u.iResolution.xy)/u.iResolution.y;
var d = sdCircle(p,0.5);
// coloring
var col = select(vec3(0.9,0.6,0.3), vec3(0.65,0.85,1.0), d>0.0);
col *= 1.0 - exp(-6.0*abs(d));
col *= 0.8 + 0.2*cos(150.0*d);
col = mix( col, vec3f(1.0), 1.0-smoothstep(0.0,0.01,abs(d)) );
if( u.iMouse.z>0.001 )
{
d = sdCircle(m,0.5);
col = mix(col, vec3(1.0,1.0,0.0), 1.0-smoothstep(0.0, 0.005, abs(length(p-m)-abs(d))-0.0025));
col = mix(col, vec3(1.0,1.0,0.0), 1.0-smoothstep(0.0, 0.005, length(p-m)-0.015));
}
*fragColor = vec4f(col,1.0);
}
#vertex fn vs(
#builtin(vertex_index) VertexIndex : u32
) -> #builtin(position) vec4<f32> {
var pos = array<vec2<f32>, 3>(
vec2(-1.0, -1.0),
vec2( 3.0, -1.0),
vec2(-1.0, 3.0)
);
return vec4(pos[VertexIndex], 0.0, 1.0);
}
#fragment fn fs(#builtin(position) fragCoord : vec4f) -> #location(0) vec4f {
var color = vec4f(0);
mainImage(&color, vec2f(fragCoord.x, u.iResolution.y - fragCoord.y));
return color;
}
`;
(async() => {
const adapter = await navigator.gpu?.requestAdapter();
const device = await adapter?.requestDevice();
if (!device) {
alert('need webgpu');
return;
}
const canvas = document.querySelector("canvas")
const context = canvas.getContext('webgpu');
const presentationFormat = navigator.gpu.getPreferredCanvasFormat();
context.configure({
device,
format: presentationFormat,
alphaMode: 'opaque',
});
const uniformBufferSize = 32;
const uniformBuffer = device.createBuffer({
size: uniformBufferSize,
usage: GPUBufferUsage.UNIFORM | GPUBufferUsage.COPY_DST,
});
const uniformArrayBuffer = new ArrayBuffer(uniformBufferSize);
const resolution = new Float32Array(uniformArrayBuffer, 0, 3);
const mouse = new Float32Array(uniformArrayBuffer, 16, 4);
const module = device.createShaderModule({code});
const pipeline = device.createRenderPipeline({
layout: 'auto',
vertex: {
module,
entryPoint: 'vs',
},
fragment: {
module,
entryPoint: 'fs',
targets: [{format: presentationFormat}],
}
});
const bindGroup = device.createBindGroup({
layout: pipeline.getBindGroupLayout(0),
entries: [
{ binding: 0, resource: { buffer: uniformBuffer } },
],
});
function resizeToDisplaySize(device, canvas) {
const width = Math.min(device.limits.maxTextureDimension2D, canvas.clientWidth);
const height = Math.min(device.limits.maxTextureDimension2D, canvas.clientHeight);
const needResize = width !== presentationSize[0] ||
height !== presentationSize[1];
if (needResize) {
canvas.width = width;
canvas.height = height;
}
return needResize;
}
function render() {
const width = Math.min(device.limits.maxTextureDimension2D, canvas.clientWidth);
const height = Math.min(device.limits.maxTextureDimension2D, canvas.clientHeight);
canvas.width = width;
canvas.height = height;
resolution[0] = width;
resolution[1] = height;
device.queue.writeBuffer(uniformBuffer, 0, uniformArrayBuffer);
const encoder = device.createCommandEncoder();
const pass = encoder.beginRenderPass({
colorAttachments: [{
view: context.getCurrentTexture().createView(),
clearColor: [0, 0, 0, 0],
loadOp: 'clear',
storeOp: 'store',
}]
});
pass.setPipeline(pipeline);
pass.setBindGroup(0, bindGroup);
pass.draw(3);
pass.end();
device.queue.submit([encoder.finish()]);
requestAnimationFrame(render);
}
requestAnimationFrame(render);
canvas.addEventListener('mousemove', (e) => {
mouse[0] = e.offsetX;
mouse[1] = canvas.offsetHeight - e.offsetY - 1;
});
canvas.addEventListener('mousedown', _ => mouse[2] = 1);
canvas.addEventListener('mouseup', _ => mouse[2] = 0);
})();
html, body {
margin: 0;
height: 100%;
}
canvas {
width: 100%;
height: 100%;
display: block;
}
<canvas></canvas>

Related

How to get original pixel color before shader

I have an ImageLayer and a RasterSource which uses a shader to manipulate the colors of the image.
While listening to the map's pointermove I get the color of the pixel under the pointer which is the color manipulated by the shader.
How can I get the original color before it was manipulated by the shader?
const extent = [0, 0, 1024, 968]; // the image extent in pixels.
const projection = new Projection({
code: 'xkcd-image',
units: 'pixels',
extent: extent,
});
let staticImage = new Static({ // from 'ol/source/ImageStatic'
attributions: '© xkcd',
url: "./assets/map.png",
projection: projection,
imageExtent: extent
});
let imageLayer = new ImageLayer({ // from 'ol/layer/Image'
source: new RasterSource({
sources: [staticImage],
operation: function (pixels, data) {
let p = pixels[0];
let grayscale = (p[0] + p[1] + p[2]) / 3;
p[0] = grayscale;
p[1] = grayscale;
p[2] = grayscale;
return p;
}
})
});
let map; // Map from 'ol'
map.on('pointermove', (evt) => {
let pixel = evt.pixel;
let color = imageLayer.getRenderer().getDataAtPixel(pixel, evt.framestate, 1);
// color is the one manipulated by the shader
});
More code here:
https://codesandbox.io/s/raster-original-pixel-3mejh9?file=/main.js
Note: because of security restrictions I've had to comment out the shader code which turns the colors gray
Which was adapted from this example:
https://openlayers.org/en/latest/examples/static-image.html
I found a workaround by adding a duplicate RasterLayer with a no-op operation. The trick is to add the layer just after the color manipulated layer, but with an opacity of 0.005 (apparently the lowest possible value) so it's rendered but you don't see it. Then combine the grey color's alpha with the original color's RGB values:
let noopLayer = new ImageLayer({
opacity: 0.005,
source: new RasterSource({
sources: [
staticImage
],
operation: function (pixels, data) {
let p = pixels[0];
return p;
},
}),
});
const map = new Map({
layers: [
rasterLayer,
noopLayer,
],
target: 'map',
view: new View({
projection: projection,
center: getCenter(extent),
zoom: 2,
maxZoom: 8,
}),
});
map.on('pointermove', (evt) => {
const pixel = evt.pixel;
let grey = rasterLayer.getRenderer().getDataAtPixel(pixel, evt.framestate, 1);
let noop = noopLayer.getRenderer().getDataAtPixel(pixel, evt.framestate, 1);
if (grey != null) {
// [228, 228, 228, 255]
console.log('grey:', grey[0], grey[1], grey[2], grey[3]);
// [255, 255, 170, 3]
console.log('noop:', noop[0], noop[1], noop[2], noop[3]);
// [255, 255, 170, 255]
console.log('original:', noop[0], noop[1], noop[2], grey[3]);
}
});

SwiftUI: How to manage dynamic rows/columns of Views?

I am finding my first SwiftUI challenge to be a tricky one. Given a set of playing cards, display them in a way that allows the user to see the full deck while using space efficiently. Here's a simplified example:
In this case 52 cards (Views) are presented, in order of 01 - 52. They are dynamically packed into the parent view such that there is enough spacing between them to allow the numbers to be visible.
The problem
If we change the shape of the window, the packing algorithm will pack them (correctly) into a different number of rows & columns. However, when the number of rows/columns change, the card Views are out of order (some are duplicated):
In the image above, notice how the top row is correct (01 - 26) but the second row starts at 12 and ends at 52. I expect his is because the second row originally contained 12 - 22 and those views were not updated.
Additional criteria: The number of cards and the order of those cards can change at runtime. Also, this app must be able to be run on Mac, where the window size can be dynamically adjusted to any shape (within reason.)
I understand that when using ForEach for indexing, one must use a constant but I must loop through a series of rows and columns, each of which can change. I have tried adding id: \.self, but this did not solve the problem. I ended up looping through the maximum possible number of rows/columns (to keep the loop constant) and simply skipped the indices that I didn't want. This is clearly wrong.
The other alternative would be to use arrays of Identifiable structures. I tried this, but wasn't able to figure out how to organize the data flow. Also, since the packing is dependent on the size of the parent View it would seem that the packing must be done inside the parent. How can the parent generate the data needed to fulfill the deterministic requirements of SwiftUI?
I'm willing to do the work to get this working, any help understanding how I should proceed would be greatly appreciated.
The code below is a fully working, simplified version. Sorry if it's still a bit large. I'm guessing the problem revolves around the use of the two ForEach loops (which are, admittedly, a bit janky.)
import SwiftUI
// This is a hacked together simplfied view of a card that meets all requirements for demonstration purposes
struct CardView: View {
public static let kVerticalCornerExposureRatio: CGFloat = 0.237
public static let kPhysicalAspect: CGFloat = 63.5 / 88.9
#State var faceCode: String
func bgColor(_ faceCode: String) -> Color {
let ascii = Character(String(faceCode.suffix(1))).asciiValue!
let r = (CGFloat(ascii) / 3).truncatingRemainder(dividingBy: 0.7)
let g = (CGFloat(ascii) / 17).truncatingRemainder(dividingBy: 0.9)
let b = (CGFloat(ascii) / 23).truncatingRemainder(dividingBy: 0.6)
return Color(.sRGB, red: r, green: g, blue: b, opacity: 1)
}
var body: some View {
GeometryReader { geometry in
RoundedRectangle(cornerRadius: 10)
.fill(bgColor(faceCode))
.cornerRadius(8)
.frame(width: geometry.size.height * CardView.kPhysicalAspect, height: geometry.size.height)
.aspectRatio(CardView.kPhysicalAspect, contentMode: .fit)
.overlay(Text(faceCode)
.font(.system(size: geometry.size.height * 0.1))
.padding(5)
, alignment: .topLeading)
.overlay(RoundedRectangle(cornerRadius: 10).stroke(lineWidth: 2))
}
}
}
// A single rows of our fanned out cards
struct RowView: View {
var cards: [String]
var width: CGFloat
var height: CGFloat
var start: Int
var columns: Int
var cardWidth: CGFloat {
return height * CardView.kPhysicalAspect
}
var cardSpacing: CGFloat {
return (width - cardWidth) / CGFloat(columns - 1)
}
var body: some View {
HStack(spacing: 0) {
// Visit all cards, but only add the ones that are within the range defined by start/columns
ForEach(0 ..< cards.count) { index in
if index < columns && start + index < cards.count {
HStack(spacing: 0) {
CardView(faceCode: cards[start + index])
.frame(width: cardWidth, height: height)
}
.frame(width: cardSpacing, alignment: .leading)
}
}
}
}
}
struct ContentView: View {
#State var cards: [String]
#State var fanned: Bool = true
// Generates the number of rows/columns that meets our rectangle-packing criteria
func pack(area: CGSize, count: Int) -> (rows: Int, cols: Int) {
let areaAspect = area.width / area.height
let exposureAspect = 1 - CardView.kVerticalCornerExposureRatio
let aspect = areaAspect / CardView.kPhysicalAspect * exposureAspect
var rows = Int(ceil(sqrt(Double(count)) / aspect))
let cols = count / rows + (count % rows > 0 ? 1 : 0)
while cols * (rows - 1) >= count { rows -= 1 }
return (rows, cols)
}
// Calculate the height of a card such that a series of rows overlap without covering the corner pips
func cardHeight(frameHeight: CGFloat, rows: Int) -> CGFloat {
let partials = CGFloat(rows - 1) * CardView.kVerticalCornerExposureRatio + 1
return frameHeight / partials
}
var body: some View {
VStack {
GeometryReader { geometry in
let w = geometry.size.width
let h = geometry.size.height
if w > 0 && h > 0 { // using `geometry.size != .zero` crashes the preview :(
let (rows, cols) = pack(area: geometry.size, count: cards.count)
let cardHeight = cardHeight(frameHeight: h, rows: rows)
let rowSpacing = cardHeight * CardView.kVerticalCornerExposureRatio
VStack(spacing: 0) {
// Visit all cards as if the layout is one row per card and simply skip the rows
// we're not interested in. If I make this `0 ..< rows` - it doesn't work at all
ForEach(0 ..< cards.count) { row in
if row < rows {
RowView(cards: cards, width: w, height: cardHeight, start: row * cols, columns: cols)
.frame(width: w, height: rowSpacing, alignment: .topLeading)
}
}
}
.frame(width: w, height: 100, alignment: .topLeading)
}
}
}
}
}
struct ContentView_Previews: PreviewProvider {
static var previews: some View {
ContentView(cards: ["01", "02", "03", "04", "05", "06", "07", "08", "09",
"10", "11", "12", "13", "14", "15", "16", "17", "18", "19",
"20", "21", "22", "23", "24", "25", "26", "27", "28", "29",
"30", "31", "32", "33", "34", "35", "36", "37", "38", "39",
"40", "41", "42", "43", "44", "45", "46", "47", "48", "49",
"50", "51", "52"])
.background(Color.white)
.preferredColorScheme(.light)
}
}
I think you're on the right track that you need to use an Identifiable to prevent the system from making assumptions about what can be recycled in the ForEach. To that end, I've created a Card:
struct Card : Identifiable {
let id = UUID()
var title : String
}
Within the RowView, this is trivial to use:
struct RowView: View {
var cards: [Card]
var width: CGFloat
var height: CGFloat
var columns: Int
var cardWidth: CGFloat {
return height * CardView.kPhysicalAspect
}
var cardSpacing: CGFloat {
return (width - cardWidth) / CGFloat(columns - 1)
}
var body: some View {
HStack(spacing: 0) {
// Visit all cards, but only add the ones that are within the range defined by start/columns
ForEach(cards) { card in
HStack(spacing: 0) {
CardView(faceCode: card.title)
.frame(width: cardWidth, height: height)
}
.frame(width: cardSpacing, alignment: .leading)
}
}
}
}
In the ContentView, things get a little more complicated because of the dynamic rows:
struct ContentView: View {
#State var cards: [Card] = (1..<53).map { Card(title: "\($0)") }
#State var fanned: Bool = true
// Generates the number of rows/columns that meets our rectangle-packing criteria
func pack(area: CGSize, count: Int) -> (rows: Int, cols: Int) {
let areaAspect = area.width / area.height
let exposureAspect = 1 - CardView.kVerticalCornerExposureRatio
let aspect = areaAspect / CardView.kPhysicalAspect * exposureAspect
var rows = Int(ceil(sqrt(Double(count)) / aspect))
let cols = count / rows + (count % rows > 0 ? 1 : 0)
while cols * (rows - 1) >= count { rows -= 1 }
return (rows, cols)
}
// Calculate the height of a card such that a series of rows overlap without covering the corner pips
func cardHeight(frameHeight: CGFloat, rows: Int) -> CGFloat {
let partials = CGFloat(rows - 1) * CardView.kVerticalCornerExposureRatio + 1
return frameHeight / partials
}
var body: some View {
VStack {
GeometryReader { geometry in
let w = geometry.size.width
let h = geometry.size.height
if w > 0 && h > 0 { // using `geometry.size != .zero` crashes the preview :(
let (rows, cols) = pack(area: geometry.size, count: cards.count)
let cardHeight = cardHeight(frameHeight: h, rows: rows)
let rowSpacing = cardHeight * CardView.kVerticalCornerExposureRatio
VStack(spacing: 0) {
ForEach(Array(cards.enumerated()), id: \.1.id) { (index, card) in
let row = index / cols
if index % cols == 0 {
let rangeMin = min(cards.count, row * cols)
let rangeMax = min(cards.count, rangeMin + cols)
RowView(cards: Array(cards[rangeMin..<rangeMax]), width: w, height: cardHeight, columns: cols)
.frame(width: w, height: rowSpacing, alignment: .topLeading)
}
}
}
.frame(width: w, height: 100, alignment: .topLeading)
}
}
}
}
}
This loops through all of the cards and uses the unique IDs. Then, there's some logic to use the index to determine what row the loop is on and if it is the beginning of the loop (and thus should render the row). Finally, it sends just a subset of the cards to the RowView.
Note: you can look at Swift Algorithms for a more efficient method than enumerated. See indexed: https://github.com/apple/swift-algorithms/blob/main/Guides/Indexed.md
#jnpdx has provided a valid answer that is direct in its approach, which helps to understand the problem without adding additional complexity.
I have also stumbled across an alternative approach that requires more drastic changes to the structure of the code, but is more performant while also leading to more production-ready code.
To begin with, I created a CardData struct that implements the ObservableObject protocol. This includes the code to pack a set of cards into rows/columns based on a given CGSize.
class CardData: ObservableObject {
var cards = [[String]]()
var hasData: Bool {
return cards.count > 0 && cards[0].count > 0
}
func layout(cards: [String], size: CGSize) -> CardData {
// ...
// Populate `cards` with packed rows/columns
// ...
return self
}
}
This would only work if the layout code could know the frame size for which it was packing. To that end, I used .onChange(of:perform:) to track changes to the geometry itself:
.onChange(of: geometry.size, perform: { size in
cards.layout(cards: cardStrings, size: size)
})
This greatly simplifies the ContentView:
var body: some View {
VStack {
GeometryReader { geometry in
let cardHeight = cardHeight(frameHeight: geometry.size.height, rows: cards.rows)
let rowSpacing = cardHeight * CardView.kVerticalCornerExposureRatio
VStack(spacing: 0) {
ForEach(cards.cards, id: \.self) { row in
RowView(cards: row, width: geometry.size.width, height: cardHeight)
.frame(width: geometry.size.width, height: rowSpacing, alignment: .topLeading)
}
}
.frame(width: geometry.size.width, height: 100, alignment: .topLeading)
.onChange(of: geometry.size, perform: { size in
_ = cards.layout(cards: CardData.faceCodes, size: size)
})
}
}
}
In addition, it also simplifies the RowView:
var body: some View {
HStack(spacing: 0) {
ForEach(cards, id: \.self) { card in
HStack(spacing: 0) {
CardView(faceCode: card)
.frame(width: cardWidth, height: height)
}
.frame(width: cardSpacing, alignment: .leading)
}
}
}
Further improvements can be had by storing rows/columns of CardViews inside CardData rather than the card title strings. This will eliminate the need to recreate a full set of (in my case, complex) CardViews in the View code.
The final end result now looks like this:

How do I scale a group in Phaser 3

In Phaser 2 we scale a simple by setting the scale property as explained in docs:
https://phaser.io/examples/v2/groups/group-scale
But there is no equivalent in Phaser v3.
The possible url https://phaser.io/examples/v3/groups/group-scale points to nothing. And if I do:
this.enemies = this.add.group();
this.enemies.scale.set(2, 2);
It throws:
Phaser v3.19.0 (WebGL | Web Audio) https://phaser.io
indexph.js:22 Uncaught TypeError: Cannot read property 'set' of undefined
What is the appropriate form to scale a group of sprites in Phaser 3?
The code below should work, I think, But it doesn't.... it doesn't scale objects that are created from the group:
preload() {
this.load.atlas("sprites", "assets/spritesheet.png", "assets/spritesheet.json")
}
create() {
this.enemies = this.add.group({
key: 'sprites' ,
setScale: { x: 0.1, y: 0.1 }
});
this.enemies.create(60, 60, 'sprites', 'hamburguer.png');
In Phaser 3, you can scale a group by modifying the GroupConfig object passed in when you declare your group.
GroupConfig API Reference. You can see also see a live demo here.
In your case, to scale this group you should simply create it like:
this.enemies = this.add.group({
setScale: { x: 2, y: 2}
});
Alternatively, you could iterate through the group once it's created and scale each child object independently.
this.enemies = this.add.group();
this.enemies.children.iterate((child) => {
child.setScale(2, 2);
});
var config = {
type: Phaser.AUTO,
parent: 'phaser-example',
width: 800,
height: 600, loader: {
baseURL: 'https://raw.githubusercontent.com/nazimboudeffa/assets/master/',
crossOrigin: 'anonymous'
},
scene: {
preload: preload,
create: create
},
physics: {
default: 'arcade'
}
};
var game = new Phaser.Game(config);
function preload ()
{
this.load.image('alien1', 'sprites/phaser-alien.png');
this.load.image('alien2', 'sprites/alien2.png');
}
function create ()
{
this.enemies1 = this.add.group();
this.enemies2 = this.add.group();
for (let i = 0; i < 64; i++)
{
let x = Phaser.Math.Between(0, 400);
let y = Phaser.Math.Between(0, 600);
this.enemy1 = this.add.image(x, y, 'alien1');
this.enemies1.add(this.enemy1);
}
for (let i = 0; i < 64; i++)
{
let x = Phaser.Math.Between(400, 800);
let y = Phaser.Math.Between(0, 600);
this.enemy2 = this.add.image(x, y, 'alien2');
this.enemies2.add(this.enemy2);
}
console.log(this.enemies1.getLength())
//console.log(this.enemies.getChildren())
console.log(this.enemies1.getChildren()[2])
for (let i = 0; i < 64; i++)
{
this.enemies1.getChildren()[i].setScale(2);
}
}
<script src="//cdn.jsdelivr.net/npm/phaser#3.19.0/dist/phaser.js"></script>

Area map into a bootstrap tab not working

I have a bootstrap tabs that is working well.
I have an area map that is working well if it is not inserted into a tab.
I use the Responsive Image Maps jQuery Plugin from Matt Stow, also works fine.
The symptom:
Then I put the area map into one of the tabs, a not active by default.
Then I click on the tab to make it shown. So the img is well shown.
But the area map does not working. I can't see the clickable rect.
But if I manually resize my navigator, then the area map works.
The page: https://boutique.bilp.fr/71-les-pieds-de-poteaux.html
Select tab "Guide de choix", the white rectangles should be clickable. THey are not until I manually resize the window.
The cause:
The responsible is the Responsive Image Maps jQuery Plugin. In its code, it makes a call to the jquery .width() method to obtain the width of the img where the map should works. And because the parent (tab) is hidden, the returned width is wrong. And it uses it to resize the map... with bad values. The map is then so small that it seems to not work.
Thanks for your help.
One solution is to modify the Responsive Image Maps jQuery Plugin by making ancestors visible before calling width().
Original code:
/*
* rwdImageMaps jQuery plugin v1.6
*
* Allows image maps to be used in a responsive design by recalculating the area coordinates to match the actual image size on load and window.resize
*
* Copyright (c) 2016 Matt Stow
* https://github.com/stowball/jQuery-rwdImageMaps
* http://mattstow.com
* Licensed under the MIT license
*/
;(function($) {
$.fn.rwdImageMaps = function() {
var $img = this;
var rwdImageMap = function() {
$img.each(function() {
if (typeof($(this).attr('usemap')) == 'undefined')
return;
var that = this,
$that = $(that);
// Since WebKit doesn't know the height until after the image has loaded, perform everything in an onload copy
$('<img />').on('load', function() {
var attrW = 'width',
attrH = 'height',
w = $that.attr(attrW),
h = $that.attr(attrH);
if (!w || !h) {
var temp = new Image();
temp.src = $that.attr('src');
if (!w)
w = temp.width;
if (!h)
h = temp.height;
}
var wPercent = $that.width()/100,
hPercent = $that.height()/100,
map = $that.attr('usemap').replace('#', ''),
c = 'coords';
$('map[name="' + map + '"]').find('area').each(function() {
var $this = $(this);
if (!$this.data(c))
$this.data(c, $this.attr(c));
var coords = $this.data(c).split(','),
coordsPercent = new Array(coords.length);
for (var i = 0; i < coordsPercent.length; ++i) {
if (i % 2 === 0)
coordsPercent[i] = parseInt(((coords[i]/w)*100)*wPercent);
else
coordsPercent[i] = parseInt(((coords[i]/h)*100)*hPercent);
}
$this.attr(c, coordsPercent.toString());
});
}).attr('src', $that.attr('src'));
});
};
$(window).resize(rwdImageMap).trigger('resize');
return this;
};
})(jQuery);
The modified code:
/*
* rwdImageMaps jQuery plugin v1.6
*
* Allows image maps to be used in a responsive design by recalculating the area coordinates to match the actual image size on load and window.resize
*
* Copyright (c) 2016 Matt Stow
* https://github.com/stowball/jQuery-rwdImageMaps
* http://mattstow.com
* Licensed under the MIT license
*/
;(function($) {
$.fn.rwdImageMaps = function() {
var $img = this;
var rwdImageMap = function() {
$img.each(function() {
if (typeof($(this).attr('usemap')) == 'undefined')
return;
var that = this,
$that = $(that);
// Since WebKit doesn't know the height until after the image has loaded, perform everything in an onload copy
$('<img />').on('load', function() {
// Modif BC : make ancestors visible so .width() can return the right value
//************************************************
var hidden_ancestors = [];
$that.parents().each(function() {
if ($(this).css('display') == 'none')
{
$(this).show();
hidden_ancestors.push($(this));
};
});
// END Modif BC
var attrW = 'width',
attrH = 'height',
w = $that.attr(attrW),
h = $that.attr(attrH);
if (!w || !h) {
var temp = new Image();
temp.src = $that.attr('src');
if (!w)
w = temp.width;
if (!h)
h = temp.height;
}
var wPercent = $that.width()/100,
hPercent = $that.height()/100,
map = $that.attr('usemap').replace('#', ''),
c = 'coords';
$('map[name="' + map + '"]').find('area').each(function() {
var $this = $(this);
if (!$this.data(c))
$this.data(c, $this.attr(c));
var coords = $this.data(c).split(','),
coordsPercent = new Array(coords.length);
for (var i = 0; i < coordsPercent.length; ++i) {
if (i % 2 === 0)
coordsPercent[i] = parseInt(((coords[i]/w)*100)*wPercent);
else
coordsPercent[i] = parseInt(((coords[i]/h)*100)*hPercent);
}
$this.attr(c, coordsPercent.toString());
});
// Modif BC : Restore invisibility on ancestors
//*********************************************
jQuery.each(hidden_ancestors, function(index, value)
{
$(value).css({display: ''});
});
// END Modif BC
}).attr('src', $that.attr('src'));
});
};
$(window).resize(rwdImageMap).trigger('resize');
return this;
};
})(jQuery);
I will propose this improvment to Matt Stow, the author

Keep objects looking at camera

guys I know this question has been asked several times, several different ways, but I just can get it to work. Basically I have 2d clouds, but I want the camera to rotate around an object floating above the clouds. The problem is, when im not looking a the face of the clouds u can tell that they are 2d. Soooo i want the the clouds to "look" at the camera where ever it is. I believe my problem stems from how the cloud geometry is called on to the planes, but here take a look. I put the a lookAt function with in my animate function. I hope you can point me in the right direction at least.
Three.js rev. 70...
container.appendChild(renderer.domElement);
camera = new THREE.PerspectiveCamera(fov, aspect, near, far);
camera.position.set(0, 0, 100);
scene.add(camera);
controls = new THREE.OrbitControls( camera );
controls.target.copy( new THREE.Vector3( 0, 0,475) );
controls.minDistance = 50;
controls.maxDistance = 200;
controls.autoRotate = true;
controls.autoRotateSpeed = .2; // 30 seconds per round when fps is 60
controls.minPolarAngle = Math.PI/4; // radians
controls.maxPolarAngle = Math.PI/2; // radians
controls.enableDamping = true;
controls.dampingFactor = 0.25;
clock = new THREE.Clock();
cloudGeometry = new THREE.Geometry();
var texture = THREE.ImageUtils.loadTexture('img/cloud10.png', null, animate);
texture.magFilter = THREE.LinearMipMapLinearFilter;
texture.minFilter = THREE.LinearMipMapLinearFilter;
var fog = new THREE.Fog(0x4584b4, -100, 3000);
cloudMaterial = new THREE.ShaderMaterial({
uniforms: {
"map": {
type: "t",
value: texture
},
"fogColor": {
type: "c",
value: fog.color
},
"fogNear": {
type: "f",
value: fog.near
},
"fogFar": {
type: "f",
value: fog.far
},
},
vertexShader: document.getElementById('vs').textContent,
fragmentShader: document.getElementById('fs').textContent,
depthWrite: false,
depthTest: false,
transparent: true
});
var plane = new THREE.Mesh(new THREE.PlaneGeometry(64, 64));
for (var i = 0; i < 8000; i++) {
plane.position.x = Math.random() * 1000 - 500;
plane.position.y = -Math.random() * Math.random() * 200 - 15;
plane.position.z = i;
plane.rotation.z = Math.random() * Math.PI;
plane.scale.x = plane.scale.y = Math.random() * Math.random() * 1.5 + 0.5;
plane.updateMatrix();
cloudGeometry.merge(plane.geometry, plane.matrix);
}
cloud = new THREE.Mesh(cloudGeometry, cloudMaterial);
scene.add(cloud);
cloud = new THREE.Mesh(cloudGeometry, cloudMaterial);
cloud.position.z = -8000;
scene.add(cloud);
var radius = 100;
var xSegments = 50;
var ySegments = 50;
var geo = new THREE.SphereGeometry(radius, xSegments, ySegments);
var mat = new THREE.ShaderMaterial({
uniforms: {
lightPosition: {
type: 'v3',
value: light.position
},
textureMap: {
type: 't',
value: THREE.ImageUtils.loadTexture("img/maps/moon.jpg")
},
normalMap: {
type: 't',
value: THREE.ImageUtils.loadTexture("img/maps/normal.jpg")
},
uvScale: {
type: 'v2',
value: new THREE.Vector2(1.0, 1.0)
}
},
vertexShader: document.getElementById('vertexShader').textContent,
fragmentShader: document.getElementById('fragmentShader').textContent
});
mesh = new THREE.Mesh(geo, mat);
mesh.geometry.computeTangents();
mesh.position.set(0, 50, 0);
mesh.rotation.set(0, 180, 0);
scene.add(mesh);
}
function onWindowResize() {
renderer.setSize(window.innerWidth, window.innerHeight);
camera.aspect = window.innerWidth / window.innerHeight;
camera.updateProjectionMatrix();
}
function animate() {
requestAnimationFrame(animate);
light.orbit(mesh.position, clock.getElapsedTime());
cloud.lookAt( camera );
controls.update(camera);
renderer.render(scene, camera);
}
animate();
window.addEventListener('resize', onWindowResize, false);
just a first guess:
the lookAt function needs Vector3 as parameter. try to use camera.position in the animate function.
cloud.lookAt( camera.position );
first of all, to build 2D objects in a scene that always faces towards the camera, you should use Sprite object, so you don't have to do anything to get this effect. (and have better performance :))
Definition from THREE.org: Sprite - a sprite is a plane in an 3d scene which faces always towards the camera.
var map = THREE.ImageUtils.loadTexture( "sprite.png" );
var material = new THREE.SpriteMaterial( { map: map, color: 0xffffff, fog: true } );
var sprite = new THREE.Sprite( material );
scene.add( sprite );
Please check this example: http://threejs.org/examples/#webgl_points_sprites
I would absolutely agree, I would use Sprite, or even Points, but then, if assign a texture to it, it will render it square-sized. My sprites are animated, and frames cannot be packed in square tiles, cause it would take a lot of space. I might make a mesh and use this lookAt function.