html canvas - multiple svg images which all have different fillStyle colors - vue.js

For a digital artwork I'm generating a canvas element in Vue which draws from an array of multiple images.
The images can be split in two categories:
SVG (comes with a fill-color)
PNG (just needs to be drawn as a regular image)
I came up with this:
const depict = (options) => {
ctx.clearRect(0, 0, canvas.width, canvas.height);
const myOptions = Object.assign({}, options);
if (myOptions.ext == "svg") {
return loadImage(myOptions.uri).then((img) => {
ctx.drawImage(img, 0, 0, 100, 100);
ctx.globalCompositeOperation = "source-in";
ctx.fillStyle = myOptions.clr;
ctx.fillRect(0, 0, 100, 100);
ctx.globalCompositeOperation = "source-over";
});
} else {
return loadImage(myOptions.uri).then((img) => {
ctx.fillStyle = myOptions.clr;
ctx.drawImage(img, 0, 0, 100, 100);
});
}
};
this.inputs.forEach(depict);
for context:
myOptions.clr = the color
myOptions.uri = the url of the image
myOptions.ext = the extension of the image
While all images are drawn correctly I can't figure out why the last fillStyle overlays the whole image. I just want all the svg's to have the fillStyle which is attached to them.
I tried multiple globalCompositeOperation in different orders. I also tried drawing the svg between ctx.save and ctx.restore. No succes… I might be missing some logic here.

So! I figured it out myself in the meantime :)
I created an async loop with a promise. Inside this I created a temporary canvas per image which I then drew to one canvas. I took inspiration from this solution: https://stackoverflow.com/a/6687218/15289586
Here is the final code:
// create the parent canvas
let parentCanv = document.createElement("canvas");
const getContext = () => parentCanv.getContext("2d");
const parentCtx = getContext();
parentCanv.classList.add("grid");
// det the wrapper from the DOM
let wrapper = document.getElementById("wrapper");
// this function loops through the array
async function drawShapes(files) {
for (const file of files) {
await depict(file);
}
// if looped > append parent canvas to to wrapper
wrapper.appendChild(parentCanv);
}
// async image loading worked best
const loadImage = (url) => {
return new Promise((resolve, reject) => {
const img = new Image();
img.onload = () => resolve(img);
img.onerror = () => reject(new Error(`load ${url} fail`));
img.src = url;
});
};
// depict the file
const depict = (options) => {
// make a promise
return new Promise((accept, reject) => {
const myOptions = Object.assign({}, options);
var childCanv = document.createElement("canvas");
const getContext = () => childCanv.getContext("2d");
const childCtx = getContext();
if (myOptions.ext == "svg") {
loadImage(myOptions.uri).then((img) => {
childCtx.drawImage(img, 0, 0, 100, parentCanv.height);
childCtx.globalCompositeOperation = "source-in";
childCtx.fillStyle = myOptions.clr;
childCtx.fillRect(0, 0, parentCanv.width, parentCanv.height);
parentCtx.drawImage(childCanv, 0, 0);
accept();
});
} else {
loadImage(myOptions.uri).then((img) => {
// ctx.fillStyle = myOptions.clr;
childCtx.drawImage(img, 0, 0, 100, parentCanv.height);
parentCtx.drawImage(childCanv, 0, 0);
accept();
});
}
});
};
drawShapes(this.inputs);

Related

Three.js load a gltf model and set color for it but when zooming out it is all black

Hello everyone,I have meet a strange problem which is that I loaded a gltf model in three.js and set color for it.When zooming in it has colors, but when zooming out,it is all black.And when I directly set color for it's material,it can works well.
Thank you.
here is the sample sreenphotos and code.
loadGlbModel() {
const loader = new GLTFLoader();
loader.load(
`/three/eiffel-tower.gltf`,
(gltf) => {
const geometry = gltf.scene.children[0].geometry;
const positions = geometry.attributes.position;
const count = positions.count;
geometry.setAttribute(
"color",
new THREE.BufferAttribute(new Float32Array(count * 3), 3)
);
const color = new THREE.Color();
const colors = geometry.attributes.color;
const radius = 200;
debugger;
for (let i = 0; i < count; i++) {
color.setHSL(positions.getY(i) / radius / 2, 1.0, 0.5);
colors.setXYZ(i, 1, 0, 0);
}
const material = new THREE.MeshPhongMaterial({
color: 0xffffff,
flatShading: true,
vertexColors: true,
shininess: 0,
});
const wireframeMaterial = new THREE.MeshBasicMaterial({
color: 0x000000,
wireframe: true,
transparent: true,
});
let mesh = new THREE.Mesh(geometry, material);
let wireframe = new THREE.Mesh(geometry, wireframeMaterial);
mesh.add(wireframe);
mesh.scale.set(0.1, 0.1, 0.1);
const redColor = new THREE.Color(1, 0, 0);
console.log(mesh);
// mesh.children[0].material.color = redColor;
const light = new THREE.DirectionalLight(0xffffff);
light.position.set(0, 0, 1);
this.scene.add(light);
this.scene.add(mesh);
},
(xhr) => {
console.log((xhr.loaded / xhr.total) * 100 + "% loaded");
},
(error) => {
console.error(error);
}
);
},
You are rendering the wireframe version too, which consists of lines in screen-space. As you zoom out, these lines maintain the same width in pixels, thus covering everything else.
Do you wish to render the fireframe too? If not, don't. If you do, then consider hiding it as the user zooms out.

Jerky Animations After Awhile ThreeJs

At first, my animation seems to work fine. However, after a few seconds, the animations become very jerky and laggy, I'm not sure why.
At first I thought it was due to the animation button I had which allows the user to start and stop the animation at will. However, even after I commented out the button, the animation continued to be laggy.
let camera, scene, renderer;
const loader = new GLTFLoader();
let mixer = null;
let controls;
const clock = new THREE.Clock();
let previousTime = 0;
//start and stop button
let runAnim = false;
let isPlay = true;
//animation
function animation() {
if (!isPlay) return;
const elapsedTime = clock.getElapsedTime();
const deltaTime = elapsedTime - previousTime;
previousTime = elapsedTime;
//Update mixer
if (mixer !== null) {
mixer.update(deltaTime);
}
// Update controls
controls.update();
window.requestAnimationFrame(animation);
render();
}
function render() {
renderer.render(scene, camera);
}
module.exports = function getImage() {
const mountRef = useRef(null);
useEffect(() => {
//Model
loader.load(`/gltf/1.gltf`);
mixer = new THREE.AnimationMixer(gltf.scene);
const action = mixer.clipAction(gltf.animations[0]);
action.play();
animation();
//Camera
camera = new THREE.PerspectiveCamera(
70,
window.innerWidth / window.innerHeight,
0.1,
100
);
camera.position.set(4, 0, 5);
scene = new THREE.Scene();
// Controls
controls = new OrbitControls(camera, renderer.domElement);
controls.update();
controls.enableDamping = true;
// Animation button
const animateButton = document.getElementById('animate-button');
const stopAnimation = (e) => {
if (runAnim) {
runAnim = false;
isPlay = true;
animation();
console.log('animation starts');
} else {
runAnim = true;
isPlay = false;
console.log('animation stops');
}
};
animateButton.addEventListener('click', stopAnimation);
return () => mountRef.removeChild(renderer.domElement);
}, []);
return (
<div>
<div ref={mountRef}>
<AnimationButton />
</div>
</div>
);
};

Gettting wrong landmarks from mediapipe handpose

I am creating a handpose recognition app using mediapipe handpose. But the landmarks that I am getting from the estimatehands functions keeps on increasing and after certiain point i am receiving NAN of array as a output. The image that is received in the function is from tensor camera. Please do help. Thank You..
// run for every image in a video
let frame = 0;
const computeRecognitionEveryNFrames = 1;
const handleCameraStream = (images) => {
let outer = [];
let points = [];
const loop = async () => {
if (net) {
if (frame % computeRecognitionEveryNFrames === 0) {
const nextImageTensor = images.next().value;
if (nextImageTensor) {
if (outer.length > 90) outer = [];
if (points.length > 11340) points = [];
const objects = await net.estimateHands(nextImageTensor);
console.log(objects);
objects.length > 0
? objects.map((object) => {
object.landmarks.map((arr) => {
points.push(...arr);
});
})
: null;
// tf.dispose([nextImageTensor]);
nextImageTensor.dispose();
if (points.length >= 126) outer.push(points.slice(-126));
// if (outer.length >= 30) {
// (async () => {
// const model = await tf.loadLayersModel(
// bundleResourceIO(modelJSON, modelWeights)
// );
// // console.log(outer);
// const ypreds = Array.from(
// model
// .predict(tf.tensor(outer.slice(-30)).expandDims(0))
// .dataSync()
// );
// // console.log(ypreds.indexOf(Math.max(ypreds)));
// console.log(ypreds);
// })();
// }
console.log(outer.length);
}
}
frame += 1;
frame = frame % computeRecognitionEveryNFrames;
}
requestAnimationFrame(loop);
};
loop();
};
=========================================================
<TensorCamera
style={{
width: 256,
height: height * 0.8,
marginRight: "auto",
marginLeft: "auto",
}}
resizeHeight={height * 0.8}
resizeDepth={3}
resizeWidth={256}
type={Camera.Constants.Type.front}
autorender={true}
onReady={(images) => handleCameraStream(images)}
/>
</View>```
[![landmarks][1]][1]
[1]: https://i.stack.imgur.com/pR0JW.png

BackstopJS: Not able to scroll the page to end using scrollToSelector

I am using two files one to keep all the URLs and other variables and other to keep the scenario config
const projectId = "test"; //
let baseUrl = "someurl"; //
let scrollToSelector = "";
let removeSelector = "";
// Replace the values of the below array with the relative URLs of your website. E.g., "/about", "/contact", "/pricing", etc.
// Use just "/" to test the homepage of your website.
// Add as many relative URLs as you need.
const relativeUrls =[
"/about",
"/documentation",
"/case-studies/",
"/solutions/",
"/blog/"
];
relativeUrls.map(relativeUrl => {
if (relativeUrl === "/about") {
scrollToSelector = "a.wp-block-button__link";
removeSelector = ".is-style-image-banner"
console.log(scrollToSelector);
}
});
// Leave the below array as is if you want to test your website using the viewports listed below.
// The suported viewports are: phone (320px X 480px), tablet (1024px X 768px), and desktop (1280px X 1024px).
// No other viewports are supported.
// You can remove the viewports that you don't need, but at least one of them is required.
const viewports = [
"phone",
"tablet",
"desktop",
];
module.exports = {
baseUrl,
projectId,
relativeUrls,
viewports,
scrollToSelector,
removeSelector
};
mainConfig.js
const THREE_SECONDS_IN_MS = 3000;
const scenarios = [];
const viewports = [];
basicConfig.relativeUrls.map(relativeUrl => {
scenarios.push({
label: relativeUrl,
url: `${basicConfig.baseUrl}${relativeUrl}`,
delay: THREE_SECONDS_IN_MS,
requireSameDimensions: false,
scrollToSelector: basicConfig.scrollToSelector,
removeSelectors: [basicConfig.removeSelector]
// onReadyScript: "onReadyScript.js",
// readyEvent: "page_loaded"
});
});
basicConfig.viewports.map(viewport => {
if (viewport === "phone") {
pushViewport(viewport, 320, 480);
}
if (viewport === "tablet") {
pushViewport(viewport, 1024, 768);
}
if (viewport === "desktop") {
pushViewport(viewport, 1280, 1024);
}
});
function pushViewport(viewport, width, height) {
viewports.push({
name: viewport,
width,
height,
});
}
module.exports = {
id: basicConfig.projectId,
viewports,
scenarios,
paths: {
bitmaps_reference: "test/backstop_data/bitmaps_reference",
bitmaps_test: "test/backstop_data/bitmaps_test",
html_report: "test/backstop_data/html_report"
},
report: ["CI"],
engine: "puppeteer",
engineOptions: {
args: ["--no-sandbox"]
},
asyncCaptureLimit: 5,
asyncCompareLimit: 50,
};
scrollToSelector doesn't seem to be working. Is there any other way it should declared and called.
Your scrollToSelector need to be in the rectangular parentesis [], like that scrollToSelector: ['.a.wp-block-button__link']

Image ratio using in image drawing react-native

I have an app, where I take a pic and draw it to the pdf file.
I have been trying to get them in right ratio. If I take all picture horizontal, it would be fine. If I take all pictures vertical, it would be fine. Problem is, when I take both vertical and horizontal, last picture's ratio applied to all pictures.
I save image ratio to AsyncStorage, where I get it in other component.
Questions is: How can I keep original ratios when I take pictures both ways?
Camera component -> takePicture function:
takePicture = async() => {
if (this.camera) {
const options = { quality: 0.5, base64: true , fixOrientation: true};
const data = await this.camera.takePictureAsync(options);
console.log(data.uri);
Image.getSize(data.uri, (width, height) => { // Get image height and width
let imageWidth = width;
let imageHeight = height;
let stringImageWidth = '' + imageWidth; // Convert to string for AsyncStorage saving
let stringImageHeight = '' + imageHeight;
const horizontalRatioCalc = () => { // return ratio for own variable
return (imageWidth/imageHeight);
};
const verticalRatioCalc = () => {
return (imageWidth/imageHeight);
};
horizontalImageRatio = horizontalRatioCalc();
verticalImageRatio = verticalRatioCalc();
stringHorizontalImageRatio = '' + horizontalImageRatio;
stringVerticalImageRatio = '' + verticalImageRatio;
console.log(`Size of the picture ${imageWidth}x${imageHeight}`);
horizontalRatio = async () => {
if (imageHeight>imageWidth) {
verticalRatioCalc();
try {
AsyncStorage.setItem("imageVerticalRatio", stringVerticalImageRatio),
AsyncStorage.setItem("asyncimageWidth", stringImageWidth),
AsyncStorage.setItem("asyncimageHeight", stringImageHeight),
console.log(`Vertical ratio saved! It's ${stringVerticalImageRatio}`),
console.log(`Image Width saved! It's ${stringImageWidth}`),
console.log(`Image height saved! It's ${stringImageHeight}`)
} catch (e) {
console.log(`AsyncStorage saving of image vertical ratio cannot be done.`)
}
}if (imageHeight<imageWidth) {
horizontalRatioCalc();
try {
AsyncStorage.setItem("imageHorizontalRatio", stringHorizontalImageRatio),
AsyncStorage.setItem("asyncimageWidth", stringImageWidth),
AsyncStorage.setItem("asyncimageHeight", stringImageHeight),
console.log(`Horizontal ratio saved! It's ${stringHorizontalImageRatio}`),
console.log(`Image Width saved! It's ${stringImageWidth}`),
console.log(`Image height saved! It's ${stringImageHeight}`)
} catch (e) {
console.log(`AsyncStorage saving of image vertical ratio cannot be done.`)
}
}
}
horizontalRatio();
}, (error) => {
console.error(`Cannot size of the image: ${error.message}`);
});
back(data.uri)
//CameraRoll.saveToCameraRoll(data.uri).then((data) => {
// console.log(data)
//back(data);
//}).catch((error) => {
//console.log(error)
//})
}
CreatePdf component -> PDF drawing function:
readData = async () => {
try {
kuvakorkeus = await AsyncStorage.getItem('asyncimageHeight')
kuvaleveys = await AsyncStorage.getItem('asyncimageWidth')
vertikaaliratio = await AsyncStorage.getItem('imageVerticalRatio')
horisontaaliratio = await AsyncStorage.getItem('imageHorizontalRatio')
if (kuvakorkeus !== null) {
return kuvakorkeus;
return kuvaleveys;
return vertikaaliratio;
return horisontaaliratio;
}
} catch (e) {
alert('Failed to fetch the data from storage')
}
}
readData ();
kuvanpiirto = () => {
pdfKuvaKorkeus = 100;
if(kuvakorkeus>kuvaleveys) { // VERTICAL / PYSTYKUVA
page.drawImage(arr[i].path.substring(7),'jpg',{
x: imgX,
y: imgY,
width: pdfKuvaKorkeus*vertikaaliratio,
height: pdfKuvaKorkeus,
})
}
if(kuvakorkeus<kuvaleveys) { // Horizontal / Vaakakuva
page.drawImage(arr[i].path.substring(7),'jpg',{
x: imgX,
y: imgY,
width: pdfKuvaKorkeus*horisontaaliratio,
height: pdfKuvaKorkeus,
})
}
}
kuvanpiirto();