Object is not moving proper with mouse movement in mobile devices - createjs

I am trying to move an object with mouse using pressmove event on the object. In PC and Tablet it is working well. But, in the mobile devices it is not working well as those have different width and height.
You can view the following URL:-
http://quirktools.com/screenfly/#u=http://saurabhysecit.byethost15.com/scratchGame_Canvas.html&w=320&h=568&a=37&s=1
Direct URL is - http://saurabhysecit.byethost15.com/scratchGame_Canvas.html
It has been generated from Animate CC.
These are the code below.
JS Code -
(function (lib, img, cjs, ss, an) {
var p; // shortcut to reference prototypes
lib.ssMetadata = [];
function mc_symbol_clone() {
var clone = this._cloneProps(new this.constructor(this.mode, this.startPosition, this.loop));
clone.gotoAndStop(this.currentFrame);
clone.paused = this.paused;
clone.framerate = this.framerate;
return clone;
}
function getMCSymbolPrototype(symbol, nominalBounds, frameBounds) {
var prototype = cjs.extend(symbol, cjs.MovieClip);
prototype.clone = mc_symbol_clone;
prototype.nominalBounds = nominalBounds;
prototype.frameBounds = frameBounds;
return prototype;
}
(lib.coin = function(mode,startPosition,loop) {
this.initialize(mode,startPosition,loop,{});
// Layer 1
this.shape = new cjs.Shape();
this.shape.graphics.f().s("#AA8801").ss(1,2,1).p("AH5AAQAACrhhCBQgIAKgIAKQgQAUgTASQiKCLjAAKQgNAAgOAAQi5AAiKh2QgRgOgQgRQiUiTAAjTQAAjDCBiOIAAgBQAJgKAKgKQAUgUAVgSQB9hlCigIQAOgBAOAAQAaAAAZACQAHAAAHABQCoAUB8B9QATASAQAUQBtCEAECvQAAADAAACQAAAFAAADg");
this.shape.setTransform(-4.8,0.1);
this.shape_1 = new cjs.Shape();
this.shape_1.graphics.f("#D9AE01").s().p("AlbGiQgTgQgSgSQigieAAjiQAAjhCgifQAWgWAXgTQCRh2DCgBQAdABAbACQC+ARCLCMQAUATASAWQB6CTAADEQAADGh6CSQgSAVgUATQiUCWjQAKIgdAAQjHAAiUh+gAgqnnQijAIh8BmQgVARgUAUIgUAUIAAABQiBCOAADEQAADSCVCTQAQARARAOQCJB2C6AAIAbAAQDAgJCJiMQATgSAQgUIAQgUQBhiBAAiqIAAgJIAAgGQgEithtiFQgQgTgTgTQh7h9ipgTIAAgDQgYgCgaAAQgVAAgVACg");
this.shape_1.setTransform(-3.3,-1.7);
this.shape_2 = new cjs.Shape();
this.shape_2.graphics.f("#FFCC00").s().p("AlDGFQgQgOgQgQQiViUAAjSQAAjDCBiPIAAAAIAUgVQAUgUAUgRQB9hmCigIQAWgCAVAAQAZAAAZACIAAADIgNgCQgZgCgbAAIgcABIAcgBQAbAAAZACIANACQCoATB8B9QATATAQAUQBsCEAFCvIAAAFIAAAIQAACqhhCCIgQATQgQAUgTATQiKCLjAAJIgbABQi5AAiKh3g");
this.shape_2.setTransform(-4.8,0.1);
this.shape_3 = new cjs.Shape();
this.shape_3.graphics.f("#695400").s().p("Ah6I1IgBAAQjHgKiUiJQCUB+DIAAIAeAAQDOgKCUiWQAUgTASgWQB6iSAAjFQAAjEh6iTQgSgWgUgUQiKiLi+gRQgbgDgdAAQjCAAiSB3QCSiBDBgKIABAAIAfAAIAbAAIAAAAQDXAJCbCcIAaAbIACAEQCJCcAADUIAAACQgBDViICaIgCAEQgMANgOANQikCmjpAAIgfAAg");
this.shape_3.setTransform(9,-1.7);
this.timeline.addTween(cjs.Tween.get({}).to({state:[{t:this.shape_3},{t:this.shape_2},{t:this.shape_1},{t:this.shape}]}).wait(1));
}).prototype = getMCSymbolPrototype(lib.coin, new cjs.Rectangle(-57.8,-58.2,113.9,113.1), null);
// stage content:
(lib.scratchGame_Canvas = function(mode,startPosition,loop) {
if (loop == null) { loop = false; } this.initialize(mode,startPosition,loop,{});
// timeline functions:
this.frame_0 = function() {
var pressBool=false;
var mask_mc = new createjs.Shape();
var bg_mc = new createjs.Bitmap("images/YLogo1.jpg");
//var coin_mc = new lib.coin();
var coin_mc = new createjs.Shape(new createjs.Graphics().beginFill("#FFFFFF").drawCircle(0, 0, 50));
coin_mc.x = stage.canvas.width/2;
coin_mc.y = stage.canvas.width/2;
stage.addChild(bg_mc, coin_mc);
createjs.Touch.enable(stage, false, true);
if(sRatio<1){
coin_mc.scaleX = coin_mc.scaleY = sRatio;
}
coin_mc.addEventListener('pressmove', moveCoin.bind(this));
coin_mc.addEventListener('mouseup', releaseCoin.bind(this));
updateCacheImage(false);
function moveCoin(e){
e.currentTarget.x = e.stageX;
e.currentTarget.y = e.stageY;
stage.update();
createMask(e);
}
function createMask(e) {
if(!pressBool)return;
var xLoc = stage.mouseX-20;
var yLoc = stage.mouseY-30;
mask_mc.graphics.beginFill("FFFFFF").drawEllipse(xLoc, yLoc, 40, 60);
updateCacheImage(true);
}
function updateCacheImage(update){
var w = 361;
if (update) {
mask_mc.updateCache();
} else {
mask_mc.cache(0, 0, w, w);
}
maskFilter = new createjs.AlphaMaskFilter(mask_mc.cacheCanvas);
bg_mc.filters = [maskFilter];
if (update) {
bg_mc.updateCache(0, 0, w, w);
} else {
bg_mc.cache(0, 0, w, w);
}
}
function releaseCoin(e) {
//stage.canvas.style.cursor = "default";
pressBool = false;
updateCacheImage(true);
}
}
// actions tween:
this.timeline.addTween(cjs.Tween.get(this).call(this.frame_0).wait(1));
}).prototype = p = new cjs.MovieClip();
p.nominalBounds = new cjs.Rectangle(385.9,359.4,113.9,113);
// library properties:
lib.properties = {
width: 550,
height: 400,
fps: 24,
color: "#999999",
opacity: 1.00,
manifest: [],
preloads: []
};
})(lib = lib||{}, images = images||{}, createjs = createjs||{}, ss = ss||{}, AdobeAn = AdobeAn||{});
var lib, images, createjs, ss, AdobeAn;
HTML Code:-
<!DOCTYPE html>
<!--
NOTES:
1. All tokens are represented by '$' sign in the template.
2. You can write your code only wherever mentioned.
3. All occurrences of existing tokens will be replaced by their appropriate values.
4. Blank lines will be removed automatically.
5. Remove unnecessary comments before creating your template.
-->
<html>
<head>
<meta charset="UTF-8">
<meta name="authoring-tool" content="Adobe_Animate_CC">
<meta name="viewport" content="width=device-width, user-scalable=no, initial-scale=1.0, minimum-scale=1.0, maximum-scale=1.0, minimal-ui" />
<meta name="msapplication-tap-highlight" content="no"/>
<title>scratchGame_Canvas</title>
<!-- write your code here -->
<script src="jquery-3.2.1.slim.min.js"></script>
<script src="createjs-2015.11.26.min.js"></script>
<script src="scratchGame_Canvas.js?1497868121984"></script>
<script>
var canvas, stage, exportRoot, anim_container, dom_overlay_container, fnStartAnimation;
var pRatio = window.devicePixelRatio || 1, xRatio, yRatio, sRatio=1;
function init() {
canvas = document.getElementById("canvas");
anim_container = document.getElementById("animation_container");
dom_overlay_container = document.getElementById("dom_overlay_container");
handleComplete();
}
function handleComplete() {
//This function is always called, irrespective of the content. You can use the variable "stage" after it is created in token create_stage.
exportRoot = new lib.scratchGame_Canvas();
stage = new createjs.Stage(canvas);
stage.addChild(exportRoot);
//Registers the "tick" event listener.
fnStartAnimation = function() {
createjs.Ticker.setFPS(lib.properties.fps);
createjs.Ticker.addEventListener("tick", stage);
}
//Code to support hidpi screens and responsive scaling.
function makeResponsive(isResp, respDim, isScale, scaleType) {
var lastW, lastH, lastS=1;
window.addEventListener('resize', resizeCanvas);
resizeCanvas();
function resizeCanvas() {
var w = lib.properties.width, h = lib.properties.height;
var iw = window.innerWidth, ih=window.innerHeight;
pRatio = window.devicePixelRatio || 1, xRatio=iw/w, yRatio=ih/h, sRatio=1;
if(isResp) {
if((respDim=='width'&&lastW==iw) || (respDim=='height'&&lastH==ih)) {
sRatio = lastS;
}
else if(!isScale) {
if(iw<w || ih<h)
sRatio = Math.min(xRatio, yRatio);
}
else if(scaleType==1) {
sRatio = Math.min(xRatio, yRatio);
}
else if(scaleType==2) {
sRatio = Math.max(xRatio, yRatio);
}
}
canvas.width = w*pRatio*sRatio;
canvas.height = h*pRatio*sRatio;
canvas.style.width = dom_overlay_container.style.width = anim_container.style.width = w*sRatio+'px';
canvas.style.height = anim_container.style.height = dom_overlay_container.style.height = h*sRatio+'px';
stage.scaleX = pRatio*sRatio;
stage.scaleY = pRatio*sRatio;
lastW = iw; lastH = ih; lastS = sRatio;
}
}
makeResponsive(true,'both',false,1);
fnStartAnimation();
}
</script>
<!-- write your code here -->
</head>
<body onload="init();" style="margin:0px;">
<div id="animation_container" style="background-color:rgba(153, 153, 153, 1.00); width:550px; height:400px">
<canvas id="canvas" width="550" height="400" style="position: absolute; display: block; background-color:rgba(153, 153, 153, 1.00);"></canvas>
<div id="dom_overlay_container" style="pointer-events:none; overflow:hidden; width:550px; height:400px; position: absolute; left: 0px; top: 0px; display: block;">
</div>
</div>
</body>
</html>
Hope to get response soon.
Thanks.

As I mentioned in the comments, you can transform the coordinates, so your x/y mouse position is not affected by the canvas scaling by responsiveness, and one alternative is to use, in the binded function with the pressmove event (in your case, the function moveCoin()), the globalToLocal() method.
scratchGame_Canvas.js
Function moveCoin()
function moveCoin(e){
var point;
point = stage.globalToLocal(e.stageX, e.stageY);
e.currentTarget.x = point.x;
e.currentTarget.y = point.y;
stage.update();
createMask(e);
}
See also: Drag and drop with easeljs in Animate CC 2017
& MovieClip class (EaselJS API)

Related

Is there any way to convert "data:image/svg+xml;base64" to "data:image/png;base64" with JavaScript? [duplicate]

I want to convert SVG into bitmap images (like JPEG, PNG, etc.) through JavaScript.
Here is how you can do it through JavaScript:
Use the canvg JavaScript library to render the SVG image using Canvas: https://github.com/gabelerner/canvg
Capture a data URI encoded as a JPG (or PNG) from the Canvas, according to these instructions: Capture HTML Canvas as gif/jpg/png/pdf?
jbeard4 solution worked beautifully.
I'm using Raphael SketchPad to create an SVG. Link to the files in step 1.
For a Save button (id of svg is "editor", id of canvas is "canvas"):
$("#editor_save").click(function() {
// the canvg call that takes the svg xml and converts it to a canvas
canvg('canvas', $("#editor").html());
// the canvas calls to output a png
var canvas = document.getElementById("canvas");
var img = canvas.toDataURL("image/png");
// do what you want with the base64, write to screen, post to server, etc...
});
This seems to work in most browsers:
function copyStylesInline(destinationNode, sourceNode) {
var containerElements = ["svg","g"];
for (var cd = 0; cd < destinationNode.childNodes.length; cd++) {
var child = destinationNode.childNodes[cd];
if (containerElements.indexOf(child.tagName) != -1) {
copyStylesInline(child, sourceNode.childNodes[cd]);
continue;
}
var style = sourceNode.childNodes[cd].currentStyle || window.getComputedStyle(sourceNode.childNodes[cd]);
if (style == "undefined" || style == null) continue;
for (var st = 0; st < style.length; st++){
child.style.setProperty(style[st], style.getPropertyValue(style[st]));
}
}
}
function triggerDownload (imgURI, fileName) {
var evt = new MouseEvent("click", {
view: window,
bubbles: false,
cancelable: true
});
var a = document.createElement("a");
a.setAttribute("download", fileName);
a.setAttribute("href", imgURI);
a.setAttribute("target", '_blank');
a.dispatchEvent(evt);
}
function downloadSvg(svg, fileName) {
var copy = svg.cloneNode(true);
copyStylesInline(copy, svg);
var canvas = document.createElement("canvas");
var bbox = svg.getBBox();
canvas.width = bbox.width;
canvas.height = bbox.height;
var ctx = canvas.getContext("2d");
ctx.clearRect(0, 0, bbox.width, bbox.height);
var data = (new XMLSerializer()).serializeToString(copy);
var DOMURL = window.URL || window.webkitURL || window;
var img = new Image();
var svgBlob = new Blob([data], {type: "image/svg+xml;charset=utf-8"});
var url = DOMURL.createObjectURL(svgBlob);
img.onload = function () {
ctx.drawImage(img, 0, 0);
DOMURL.revokeObjectURL(url);
if (typeof navigator !== "undefined" && navigator.msSaveOrOpenBlob)
{
var blob = canvas.msToBlob();
navigator.msSaveOrOpenBlob(blob, fileName);
}
else {
var imgURI = canvas
.toDataURL("image/png")
.replace("image/png", "image/octet-stream");
triggerDownload(imgURI, fileName);
}
document.removeChild(canvas);
};
img.src = url;
}
The solution to convert SVG to blob URL and blob URL to png image
const svg=`<svg version="1.1" baseProfile="full" width="300" height="200"
xmlns="http://www.w3.org/2000/svg">
<rect width="100%" height="100%" fill="red" />
<circle cx="150" cy="100" r="80" fill="green" />
<text x="150" y="125" font-size="60" text-anchor="middle" fill="white">SVG</text></svg>`
svgToPng(svg,(imgData)=>{
const pngImage = document.createElement('img');
document.body.appendChild(pngImage);
pngImage.src=imgData;
});
function svgToPng(svg, callback) {
const url = getSvgUrl(svg);
svgUrlToPng(url, (imgData) => {
callback(imgData);
URL.revokeObjectURL(url);
});
}
function getSvgUrl(svg) {
return URL.createObjectURL(new Blob([svg], { type: 'image/svg+xml' }));
}
function svgUrlToPng(svgUrl, callback) {
const svgImage = document.createElement('img');
// imgPreview.style.position = 'absolute';
// imgPreview.style.top = '-9999px';
document.body.appendChild(svgImage);
svgImage.onload = function () {
const canvas = document.createElement('canvas');
canvas.width = svgImage.clientWidth;
canvas.height = svgImage.clientHeight;
const canvasCtx = canvas.getContext('2d');
canvasCtx.drawImage(svgImage, 0, 0);
const imgData = canvas.toDataURL('image/png');
callback(imgData);
// document.body.removeChild(imgPreview);
};
svgImage.src = svgUrl;
}
change svg to match your element
function svg2img(){
var svg = document.querySelector('svg');
var xml = new XMLSerializer().serializeToString(svg);
var svg64 = btoa(xml); //for utf8: btoa(unescape(encodeURIComponent(xml)))
var b64start = 'data:image/svg+xml;base64,';
var image64 = b64start + svg64;
return image64;
};svg2img()
My use case was to have the svg data loaded from a network and this ES6 Class did the Job.
class SvgToPngConverter {
constructor() {
this._init = this._init.bind(this);
this._cleanUp = this._cleanUp.bind(this);
this.convertFromInput = this.convertFromInput.bind(this);
}
_init() {
this.canvas = document.createElement("canvas");
this.imgPreview = document.createElement("img");
this.imgPreview.style = "position: absolute; top: -9999px";
document.body.appendChild(this.imgPreview);
this.canvasCtx = this.canvas.getContext("2d");
}
_cleanUp() {
document.body.removeChild(this.imgPreview);
}
convertFromInput(input, callback) {
this._init();
let _this = this;
this.imgPreview.onload = function() {
const img = new Image();
_this.canvas.width = _this.imgPreview.clientWidth;
_this.canvas.height = _this.imgPreview.clientHeight;
img.crossOrigin = "anonymous";
img.src = _this.imgPreview.src;
img.onload = function() {
_this.canvasCtx.drawImage(img, 0, 0);
let imgData = _this.canvas.toDataURL("image/png");
if(typeof callback == "function"){
callback(imgData)
}
_this._cleanUp();
};
};
this.imgPreview.src = input;
}
}
Here is how you use it
let input = "https://restcountries.eu/data/afg.svg"
new SvgToPngConverter().convertFromInput(input, function(imgData){
// You now have your png data in base64 (imgData).
// Do what ever you wish with it here.
});
If you want a vanilla JavaScript version, you could head over to Babel website and transpile the code there.
Here a function that works without libraries and returns a Promise:
/**
* converts a base64 encoded data url SVG image to a PNG image
* #param originalBase64 data url of svg image
* #param width target width in pixel of PNG image
* #return {Promise<String>} resolves to png data url of the image
*/
function base64SvgToBase64Png (originalBase64, width) {
return new Promise(resolve => {
let img = document.createElement('img');
img.onload = function () {
document.body.appendChild(img);
let canvas = document.createElement("canvas");
let ratio = (img.clientWidth / img.clientHeight) || 1;
document.body.removeChild(img);
canvas.width = width;
canvas.height = width / ratio;
let ctx = canvas.getContext("2d");
ctx.drawImage(img, 0, 0, canvas.width, canvas.height);
try {
let data = canvas.toDataURL('image/png');
resolve(data);
} catch (e) {
resolve(null);
}
};
img.onerror = function() {
resolve(null);
};
img.src = originalBase64;
});
}
On Firefox there is an issue for SVGs without set width / height.
See this working example including a fix for the Firefox issue.
This is an old question, in 2022 we have ES6 and we don't need 3rd party libraries.
Here is a very basic way to convert svg images into other formats.
The trick is to load the svg element as an img element, then use a canvas element to convert the image into the desired format. So, four steps are needed:
Extract svg as xml data string.
Load the xml data string into a img element
Convert the img element to a dataURL using a canvas element
Load the converted dataURL into a new img element
Step 1
Extracting a svg as xml data string is simple, we don't need to convert it as a base64 string. We just serialize it as XML then we encode the string as a URI:
// Select the element:
const $svg = document.getElementById('svg-container').querySelector('svg')
// Serialize it as xml string:
const svgAsXML = (new XMLSerializer()).serializeToString($svg)
// Encode it as a data string:
const svgData = `data:image/svg+xml,${encodeURIComponent(svgAsXML)}`
Step 2
Loading the xml data string into a img element:
// This function returns a Promise whenever the $img is loaded
const loadImage = async url => {
const $img = document.createElement('img')
$img.src = url
return new Promise((resolve, reject) => {
$img.onload = () => resolve($img)
$img.onerror = reject
$img.src = url
})
}
Step 3
Converting the img element to a dataURL using a canvas element:
const $canvas = document.createElement('canvas')
$canvas.width = $svg.clientWidth
$canvas.height = $svg.clientHeight
$canvas.getContext('2d').drawImage(img, 0, 0, $svg.clientWidth, $svg.clientHeight)
return $canvas.toDataURL(`image/${format}`, 1.0)
Step 4
Loading the converted dataURL into a new img element:
const $img = document.createElement('img')
$img.src = dataURL
$holder.appendChild($img)
Here you have a working snippet:
const $svg = document.getElementById('svg-container').querySelector('svg')
const $holder = document.getElementById('img-container')
const $label = document.getElementById('img-format')
const destroyChildren = $element => {
while ($element.firstChild) {
const $lastChild = $element.lastChild ?? false
if ($lastChild) $element.removeChild($lastChild)
}
}
const loadImage = async url => {
const $img = document.createElement('img')
$img.src = url
return new Promise((resolve, reject) => {
$img.onload = () => resolve($img)
$img.onerror = reject
})
}
const convertSVGtoImg = async e => {
const $btn = e.target
const format = $btn.dataset.format ?? 'png'
$label.textContent = format
destroyChildren($holder)
const svgAsXML = (new XMLSerializer()).serializeToString($svg)
const svgData = `data:image/svg+xml,${encodeURIComponent(svgAsXML)}`
const img = await loadImage(svgData)
const $canvas = document.createElement('canvas')
$canvas.width = $svg.clientWidth
$canvas.height = $svg.clientHeight
$canvas.getContext('2d').drawImage(img, 0, 0, $svg.clientWidth, $svg.clientHeight)
const dataURL = await $canvas.toDataURL(`image/${format}`, 1.0)
console.log(dataURL)
const $img = document.createElement('img')
$img.src = dataURL
$holder.appendChild($img)
}
const buttons = [...document.querySelectorAll('[data-format]')]
for (const $btn of buttons) {
$btn.onclick = convertSVGtoImg
}
.wrapper {
display: flex;
flex-flow: row nowrap;
width: 100vw;
}
.images {
display: flex;
flex-flow: row nowrap;
width: 70%;
}
.image {
width: 50%;
display: flex;
flex-flow: row wrap;
justify-content: center;
}
.label {
width: 100%;
text-align: center;
}
<div class="wrapper">
<div class="item images">
<div class="image left">
<div class="label">svg</div>
<div id="svg-container">
<svg xmlns="http://www.w3.org/2000/svg" xml:space="preserve" width="200" height="200" viewBox="0 0 248 204">
<path fill="#1d9bf0" d="M221.95 51.29c.15 2.17.15 4.34.15 6.53 0 66.73-50.8 143.69-143.69 143.69v-.04c-27.44.04-54.31-7.82-77.41-22.64 3.99.48 8 .72 12.02.73 22.74.02 44.83-7.61 62.72-21.66-21.61-.41-40.56-14.5-47.18-35.07 7.57 1.46 15.37 1.16 22.8-.87-23.56-4.76-40.51-25.46-40.51-49.5v-.64c7.02 3.91 14.88 6.08 22.92 6.32C11.58 63.31 4.74 33.79 18.14 10.71c25.64 31.55 63.47 50.73 104.08 52.76-4.07-17.54 1.49-35.92 14.61-48.25 20.34-19.12 52.33-18.14 71.45 2.19 11.31-2.23 22.15-6.38 32.07-12.26-3.77 11.69-11.66 21.62-22.2 27.93 10.01-1.18 19.79-3.86 29-7.95-6.78 10.16-15.32 19.01-25.2 26.16z"/>
</svg>
</div>
</div>
<div class="image right">
<div id="img-format" class="label"></div>
<div id="img-container"></div>
</div>
</div>
<div class="item buttons">
<button id="btn-png" data-format="png">PNG</button>
<button id="btn-jpg" data-format="jpeg">JPG</button>
<button id="btn-webp" data-format="webp">WEBP</button>
</div>
</div>
Svg to png can be converted depending on conditions:
If svg is in format SVG (string) paths:
create canvas
create new Path2D() and set svg as parameter
draw path on canvas
create image and use canvas.toDataURL() as src.
example:
const canvas = document.createElement('canvas');
const ctx = canvas.getContext('2d');
let svgText = 'M10 10 h 80 v 80 h -80 Z';
let p = new Path2D('M10 10 h 80 v 80 h -80 Z');
ctx.stroke(p);
let url = canvas.toDataURL();
const img = new Image();
img.src = url;
Note that Path2D not supported in ie and partially supported in edge. Polyfill solves that:
https://github.com/nilzona/path2d-polyfill
Create svg blob and draw on canvas using .drawImage():
make canvas element
make a svgBlob object from the svg xml
make a url object from domUrl.createObjectURL(svgBlob);
create an Image object and assign url to image src
draw image into canvas
get png data string from canvas: canvas.toDataURL();
Nice description:
https://web.archive.org/web/20200125162931/http://ramblings.mcpher.com:80/Home/excelquirks/gassnips/svgtopng
Note that in ie you will get exception on stage of canvas.toDataURL(); It is because IE has too high security restriction and treats canvas as readonly after drawing image there. All other browsers restrict only if image is cross origin.
Use canvg JavaScript library. It is separate library but has useful functions.
Like:
ctx.drawSvg(rawSvg);
var dataURL = canvas.toDataURL();
I recently discovered a couple of image tracing libraries for JavaScript that indeed are able to build an acceptable approximation to the bitmap, both size and quality. I'm developing this JavaScript library and CLI :
https://www.npmjs.com/package/svg-png-converter
Which provides unified API for all of them, supporting browser and node, non depending on DOM, and a Command line tool.
For converting logos/cartoon/like images it does excellent job. For photos / realism some tweaking is needed since the output size can grow a lot.
It has a playground although right now I'm working on a better one, easier to use, since more features has been added:
https://cancerberosgx.github.io/demos/svg-png-converter/playground/#
There are several ways to convert SVG to PNG using the Canvg library.
In my case, I needed to get the PNG blob from inline SVG.
The library documentation provides an example (see OffscreenCanvas example).
But this method does not work at the moment in Firefox. Yes, you can enable the gfx.offscreencanvas.enabled option in the settings. But will every user on the site do this? :)
However, there is another way that will work in Firefox too.
const el = document.getElementById("some-svg"); //this is our inline SVG
var canvas = document.createElement('canvas'); //create a canvas for the SVG render
canvas.width = el.clientWidth; //set canvas sizes
canvas.height = el.clientHeight;
const svg = new XMLSerializer().serializeToString(el); //convert SVG to string
//render SVG inside canvas
const ctx = canvas.getContext('2d');
const v = await Canvg.fromString(ctx, svg);
await v.render();
let canvasBlob = await new Promise(resolve => canvas.toBlob(resolve));
For the last line thanks to this answer
get data URIs from SVG:
data:image/svg+xml;base64,${btoa(new XMLSerializer().serializeToString(svgElem))}
prepare an Image
create a canvas and use toDataURL to export.
Example
<!-- test data-->
<svg width="400" height="400"><g transform="translate(23.915343915343925,-80.03971756398937)" class="glyph" stroke="#000000" fill="#a0a0a0"><path d="M74.97 108.70L74.97 108.70L100.08 110.77Q93.89 147.91 87.35 179.89L87.35 179.89L148.23 179.89L148.23 194.34Q143.76 277.91 113.84 339.81L113.84 339.81Q144.44 363.54 163.70 382.46L163.70 382.46L146.51 402.75Q128.62 384.18 101.80 361.83L101.80 361.83Q75.32 405.85 34.39 436.80L34.39 436.80L17.20 415.82Q57.43 386.93 82.20 345.66L82.20 345.66Q57.78 326.40 27.86 304.39L27.86 304.39Q44.37 257.96 56.75 203.97L56.75 203.97L19.26 203.97L19.26 179.89L61.90 179.89Q69.47 145.16 74.97 108.70ZM93.20 323.99L93.20 323.99Q118.65 272.06 123.12 203.97L123.12 203.97L82.20 203.97Q69.47 260.03 55.71 297.17L55.71 297.17Q76.01 311.61 93.20 323.99ZM160.26 285.13L160.26 260.37L239.71 260.37L239.71 216.01Q268.25 191.24 294.05 155.48L294.05 155.48L170.58 155.48L170.58 130.71L322.94 130.71L322.94 155.48Q297.49 191.93 265.50 223.92L265.50 223.92L265.50 260.37L337.38 260.37L337.38 285.13L265.50 285.13L265.50 397.59Q265.50 431.64 237.65 431.64L237.65 431.64L187.09 431.64L180.21 407.57Q202.22 407.91 227.67 407.91L227.67 407.91Q239.71 407.91 239.71 390.03L239.71 390.03L239.71 285.13L160.26 285.13Z"></path></g></svg>
<button title="download">svg2png</button>
<script>
const output = {"name": "result.png", "width": 64, "height": 64}
document.querySelector("button").onclick = () => {
const svgElem = document.querySelector("svg")
// const uriData = `data:image/svg+xml;base64,${btoa(svgElem.outerHTML)}` // it may fail.
const uriData = `data:image/svg+xml;base64,${btoa(new XMLSerializer().serializeToString(svgElem))}`
const img = new Image()
img.src = uriData
img.onload = () => {
const canvas = document.createElement("canvas");
[canvas.width, canvas.height] = [output.width, output.height]
const ctx = canvas.getContext("2d")
ctx.drawImage(img, 0, 0, output.width, output.height)
// 👇 download
const a = document.createElement("a")
const quality = 1.0 // https://developer.mozilla.org/en-US/docs/Web/API/CanvasRenderingContext2D/imageSmoothingQuality
a.href = canvas.toDataURL("image/png", quality)
a.download = output.name
a.append(canvas)
a.click()
a.remove()
}
}
</script>
Here are my 2 cents. Somehow Download anchor tag is not working as expected in code snippet, however it was working fine in Chrome.
Here is working jsFiddle
const waitForImage = imgElem => new Promise(resolve => imgElem.complete ? resolve() : imgElem.onload = imgElem.onerror = resolve);
const svgToImgDownload = ext => {
if (!['png', 'jpg', 'webp'].includes(ext))
return;
const _svg = document.querySelector("#svg_container").querySelector('svg');
const xmlSerializer = new XMLSerializer();
let _svgStr = xmlSerializer.serializeToString(_svg);
const img = document.createElement('img');
img.src = 'data:image/svg+xml;base64,' + window.btoa(_svgStr);
waitForImage(img)
.then(_ => {
const canvas = document.createElement('canvas');
canvas.width = _svg.clientWidth;
canvas.height = _svg.clientHeight;
canvas.getContext('2d').drawImage(img, 0, 0, _svg.clientWidth, _svg.clientHeight);
return canvas.toDataURL('image/' + (ext == 'jpg' ? 'jpeg' : ext), 1.0);
})
.then(dataURL => {
console.log(dataURL);
document.querySelector("#img_download_btn").innerHTML = `Download`;
})
.catch(console.error);
};
document.querySelector('#map2Png').addEventListener('click', _ => svgToImgDownload('png'));
document.querySelector('#map2Jpg').addEventListener('click', _ => svgToImgDownload('jpg'));
document.querySelector('#map2Webp').addEventListener('click', _ => svgToImgDownload('webp'));
<div id="svg_container" style="float: left; width: 50%">
<svg width="200" height="200" viewBox="-100 -100 200 200">
<circle cx="0" cy="20" r="70" fill="#D1495B" />
<circle cx="0" cy="-75" r="12" fill="none" stroke="#F79257" stroke-width="2" />
<rect x="-17.5" y="-65" width="35" height="20" fill="#F79257" />
</svg>
</div>
<div>
<button id="map2Png">PNG</button>
<button id="map2Jpg">JPG</button>
<button id="map2Webp">WEBP</button>
</div>
<div id="img_download_btn"></div>

Using Self-Segmentation javascript of Mediapipe to pass user selfie as a texture on Networked-Aframe for multiplaying experiences

Well my target is to make a multiplaying 3D environment where the persons are represented by cubes that have as textures their selfie without the background. It is a kind of cheap virtual production without chroma key background. The background is removed with MediaPipe Selfie-Segmentation. The issue is that instead of having the other player texture on the cube (P1 should see P2, and P2 should see P1, each one sees his selfie. This means that P1 sees P1 and P2 sees P2 which is bad.
Live demo: https://vrodos-multiplaying.iti.gr/plain_aframe_mediapipe_testbed.html
Instructions: You should use two machines to test it Desktops, Laptops or Mobiles. Use only Chrome as Mediapipe is not working at other browsers. In case webpage jams, reload the webpage (Mediapipe is sometimes sticky). At least two machines should load the webpage in order to start the multiplaying environment.
Code:
<html>
<head>
<script src="https://aframe.io/releases/1.2.0/aframe.min.js"></script>
<!-- Selfie Segmentation of Mediapipe -->
<link rel="stylesheet" type="text/css" href="https://cdn.jsdelivr.net/npm/#mediapipe/control_utils#0.6/control_utils.css" crossorigin="anonymous">
<script src="https://cdn.jsdelivr.net/npm/#mediapipe/camera_utils#0.3/camera_utils.js" crossorigin="anonymous"></script>
<script src="https://cdn.jsdelivr.net/npm/#mediapipe/control_utils#0.6/control_utils.js" crossorigin="anonymous"></script>
<script src="https://cdn.jsdelivr.net/npm/#mediapipe/drawing_utils#0.3/drawing_utils.js" crossorigin="anonymous"></script>
<script src="https://cdn.jsdelivr.net/npm/#mediapipe/selfie_segmentation#0.1/selfie_segmentation.js" crossorigin="anonymous"></script>
<!-- Networked A-frame -->
<script src="https://cdnjs.cloudflare.com/ajax/libs/socket.io/2.3.0/socket.io.slim.js"></script>
<script src="/easyrtc/easyrtc.js"></script>
<script src="https://unpkg.com/networked-aframe/dist/networked-aframe.min.js"></script>
</head>
<body>
<a-scene networked-scene="
adapter: easyrtc;
video: true;
debug: true;
connectOnLoad: true;">
<a-assets>
<template id="avatar-template">
<a-box material="alphaTest: 0.5; transparent: true; side: both;"
width="1"
height="1"
position="0 0 0" rotation="0 0 0"
networked-video-source-mediapiped></a-box>
</template>
</a-assets>
<a-entity id="player"
networked="template:#avatar-template;attachTemplateToLocal:false;"
camera wasd-controls look-controls>
</a-entity>
<a-plane position="0 -2 -4" rotation="-90 0 0" width="4" height="4" color="#7BC8A4" ></a-plane>
<a-sky color="#777"></a-sky>
</a-scene>
</body>
<script>
// Networked Aframe : Register new component for streaming the Selfie-Segmentation video stream
AFRAME.registerComponent('networked-video-source-mediapiped', {
schema: {
},
dependencies: ['material'],
init: function () {
this.videoTexture = null;
this.video = null;
this.stream = null;
this._setMediaStream = this._setMediaStream.bind(this);
NAF.utils.getNetworkedEntity(this.el).then((networkedEl) => {
const ownerId = networkedEl.components.networked.data.owner;
if (ownerId) {
NAF.connection.adapter.getMediaStream(ownerId, "video")
.then(this._setMediaStream)
.catch((e) => NAF.log.error(`Error getting media stream for ${ownerId}`, e));
} else {
// Correctly configured local entity, perhaps do something here for enabling debug audio loopback
}
});
},
_setMediaStream(newStream) {
if(!this.video) {
this.setupVideo();
}
if(newStream != this.stream) {
if (this.stream) {
this._clearMediaStream();
}
if (newStream) {
this.video.srcObject = canvasElement.captureStream(30);
this.videoTexture = new THREE.VideoTexture(this.video);
this.videoTexture.format = THREE.RGBAFormat;
// Mesh to send
const mesh = this.el.getObject3D('mesh');
mesh.material.map = this.videoTexture;
mesh.material.needsUpdate = true;
}
this.stream = newStream;
}
},
_clearMediaStream() {
this.stream = null;
if (this.videoTexture) {
if (this.videoTexture.image instanceof HTMLVideoElement) {
// Note: this.videoTexture.image === this.video
const video = this.videoTexture.image;
video.pause();
video.srcObject = null;
video.load();
}
this.videoTexture.dispose();
this.videoTexture = null;
}
},
remove: function() {
this._clearMediaStream();
},
setupVideo: function() {
if (!this.video) {
const video = document.createElement('video');
video.setAttribute('autoplay', true);
video.setAttribute('playsinline', true);
video.setAttribute('muted', true);
this.video = video;
}
}
});
// ----- Mediapipe ------
const controls = window;
//const mpSelfieSegmentation = window;
const examples = {
images: [],
// {name: 'name', src: 'https://url.com'},
videos: [],
};
const fpsControl = new controls.FPS();
let activeEffect = 'background';
const controlsElement = document.createElement('control-panel');
var canvasElement = document.createElement('canvas');
canvasElement.height= 1000;
canvasElement.width = 1000;
var canvasCtx = canvasElement.getContext('2d');
// --------
function drawResults(results) {
canvasCtx.clearRect(0, 0, canvasElement.width, canvasElement.height);
canvasCtx.drawImage(results.segmentationMask, 0, 0, canvasElement.width, canvasElement.height);
canvasCtx.globalCompositeOperation = 'source-in';
canvasCtx.drawImage(results.image, 0, 0, canvasElement.width, canvasElement.height);
}
const selfieSegmentation = new SelfieSegmentation({
locateFile: (file) => {
console.log(file);
return `https://cdn.jsdelivr.net/npm/#mediapipe/selfie_segmentation#0.1/${file}`;
}
});
selfieSegmentation.onResults(drawResults);
// -------------
new controls
.ControlPanel(controlsElement, {
selfieMode: true,
modelSelection: 1,
effect: 'background',
})
.add([
new controls.StaticText({title: 'MediaPipe Selfie Segmentation'}),
fpsControl,
new controls.Toggle({title: 'Selfie Mode', field: 'selfieMode'}),
new controls.SourcePicker({
onSourceChanged: () => {
selfieSegmentation.reset();
},
onFrame: async (input, size) => {
const aspect = size.height / size.width;
let width, height;
if (window.innerWidth > window.innerHeight) {
height = window.innerHeight;
width = height / aspect;
} else {
width = window.innerWidth;
height = width * aspect;
}
canvasElement.width = width;
canvasElement.height = height;
await selfieSegmentation.send({image: input});
},
examples: examples
}),
new controls.Slider({
title: 'Model Selection',
field: 'modelSelection',
discrete: ['General', 'Landscape'],
}),
new controls.Slider({
title: 'Effect',
field: 'effect',
discrete: {'background': 'Background', 'mask': 'Foreground'},
}),
])
.on(x => {
const options = x;
//videoElement.classList.toggle('selfie', options.selfieMode);
activeEffect = x['effect'];
selfieSegmentation.setOptions(options);
});
</script>
</html>
Screenshot:
Here Player 1 sees Player 1 stream instead of Player 2 stream (Grrrrrr):
Well, I found it. The problem was that a MediaStream can have many video tracks. See my answer here:
https://github.com/networked-aframe/networked-aframe/issues/269
Unfortunately networked-aframe EasyRtcAdapter does not support many MediaStreams, but it is easy to add another video track and then get videotrack[1] instead of videotrack[0]. I should make a special EasyRtcAdapter to avoid having two video tracks and avoid overstressing bandwidth.

recorded audio distorted in safari 11 when using visualization with web rtc

I've created a demo to show this, it seems that when I show a visualization the recorded audio gets really crunchy. Is there any way I can fix this? This works in chrome and Firefox, but safari is the only offender.
<style>
html, body {
margin: 0!important;
padding: 0!important;
overflow: hidden!important;
width: 100%;
}
</style>
<title>Audio Recording | RecordRTC</title>
<h1>Simple Audio Recording using RecordRTC</h1>
<meta name="viewport" content="width=device-width, initial-scale=1.0, minimum-scale=1.0">
<br>
<button id="btn-start-recording">Start Recording</button>
<button id="btn-stop-recording" disabled>Stop Recording</button>
<label><input id="visualizer" type="checkbox" checked/>Show Visualizer</label>
<hr>
<canvas id="audio-canvas"></canvas>
<div><audio controls autoplay></audio></div>
<script src="https://cdn.webrtc-experiment.com/RecordRTC.js"></script>
<script>
var audio = document.querySelector('audio');
function captureMicrophone(callback) {
if(microphone) {
callback(microphone);
return;
}
if(typeof navigator.mediaDevices === 'undefined' || !navigator.mediaDevices.getUserMedia) {
alert('This browser does not supports WebRTC getUserMedia API.');
if(!!navigator.getUserMedia) {
alert('This browser seems supporting deprecated getUserMedia API.');
}
}
navigator.mediaDevices.getUserMedia({
audio: {
echoCancellation: false
}
}).then(function(mic) {
callback(mic);
}).catch(function(error) {
alert('Unable to capture your microphone. Please check console logs.');
console.error(error);
});
}
function replaceAudio(src) {
var newAudio = document.createElement('audio');
newAudio.controls = true;
if(src) {
newAudio.src = src;
}
var parentNode = audio.parentNode;
parentNode.innerHTML = '';
parentNode.appendChild(newAudio);
audio = newAudio;
}
function stopRecordingCallback() {
replaceAudio(URL.createObjectURL(recorder.getBlob()));
btnStartRecording.disabled = false;
setTimeout(function() {
if(!audio.paused) return;
setTimeout(function() {
if(!audio.paused) return;
audio.play();
}, 1000);
audio.play();
}, 300);
audio.play();
if(microphone) {
microphone.stop();
microphone = null;
}
}
var recorder; // globally accessible
var microphone;
var btnStartRecording = document.getElementById('btn-start-recording');
var btnStopRecording = document.getElementById('btn-stop-recording');
var canvas = document.getElementById('audio-canvas');
var visualizerCheckbox = document.getElementById('visualizer');
btnStartRecording.onclick = function() {
this.disabled = true;
this.style.border = '';
this.style.fontSize = '';
if (!microphone) {
captureMicrophone(function(mic) {
microphone = mic;
replaceAudio();
audio.muted = true;
audio.play();
btnStartRecording.disabled = false;
btnStartRecording.style.border = '1px solid red';
btnStartRecording.style.fontSize = '150%';
alert('Please click startRecording button again. First time we tried to access your microphone. Now we will record it.');
});
return;
}
replaceAudio();
audio.muted = true;
audio.play();
if(visualizerCheckbox.checked){
initScope(canvas);
}
var options = {
type: 'audio',
numberOfAudioChannels: 2,
checkForInactiveTracks: true,
bufferSize: 16384,
sampleRate: 48000
};
if(recorder) {
recorder.destroy();
recorder = null;
}
recorder = RecordRTC(microphone, options);
recorder.startRecording();
btnStopRecording.disabled = false;
};
btnStopRecording.onclick = function() {
this.disabled = true;
recorder.stopRecording(stopRecordingCallback);
};
function initScope(audioScopeCanvas) {
if (audioScopeCanvas) {
var analyser;
var mic;
var javascriptNode;
var canvasContext;
audioContext = new AudioContext(); // NEW!!
analyser = audioContext.createAnalyser();
mic = audioContext.createMediaStreamSource(microphone);
javascriptNode = audioContext.createScriptProcessor(2048, 1, 1);
analyser.smoothingTimeConstant = 0.3;
analyser.fftSize = 1024;
mic.connect(analyser);
analyser.connect(javascriptNode);
javascriptNode.connect(audioContext.destination);
//canvasContext = $("#canvas")[0].getContext("2d");
canvasContext = audioScopeCanvas.getContext("2d");
javascriptNode.onaudioprocess = function () {
var array = new Uint8Array(analyser.frequencyBinCount);
analyser.getByteFrequencyData(array);
var values = 0;
var length = array.length;
for (var i = 0; i < length; i++) {
values += array[i];
}
var average = values / length;
canvasContext.clearRect(0, 0, 60, 130);
canvasContext.fillStyle = '#00ff00';
canvasContext.fillRect(0, 130 - average, 25, 130);
};
}
}
</script>
you can play with it here:
Http://andrew.colchagoff.com/safari-record.html
I'm using record rtc.

Return data using a marker in Google Maps v3

Is it possible return the street addres, province, zip code (postal code), city and country using a marker in Google Maps v3?
Sometimes not return all data using formatted_address.
Is there a way that I can return something especific? E.g. return[1].city, return[1].country, return[1].Address...
Yes.
I wrote a function that reads components from the geocoder results.
Here is a full example
<!doctype html>
<html>
<head>
<style>
#map-canvas {
width: 500px;
height: 400px;
}
</style>
<script type="text/javascript" src="http://maps.googleapis.com/maps/api/js?sensor=false"></script>
<script>
function init() {
var center = new google.maps.LatLng(50.845464, 4.3571121); // Brussels
var geocoder;
geocoder = new google.maps.Geocoder();
var map = new google.maps.Map(
document.getElementById('map-canvas'), {
center: center,
zoom: 15,
mapTypeId: google.maps.MapTypeId.ROADMAP
});
var marker = new google.maps.Marker({
draggable: true,
title: "Drag me to see the address components",
map: map,
position: center
});
google.maps.event.addListener(marker, 'dragend', function(e) {
document.getElementById('display').innerHTML = '';
var pos = marker.getPosition();
// get geoposition
geocoder.geocode({
latLng: pos
}, function(responses) {
if (responses && responses.length > 0) {
// get postal code
var postal_code = addresComponent('postal_code', responses[0])
if (postal_code) {
document.getElementById('display').innerHTML += '<div>Postal code: ' + postal_code + '</div>';
}
// get street
var street = addresComponent('route', responses[0])
if (street) {
document.getElementById('display').innerHTML += '<div>Street: ' + street + '</div>';
}
// street number (number of the house in the street)
var street_number = addresComponent('street_number', responses[0])
if (street_number) {
document.getElementById('display').innerHTML += '<div>Street number: ' + street_number + '</div>';
}
}
});
});
}
/**
* geocodeResponse is an object full of address data.
* This function will "fish" for the right value
*
* example: type = 'postal_code' =>
* geocodeResponse.address_components[5].types[1] = 'postal_code'
* geocodeResponse.address_components[5].long_name = '1000'
*
* type = 'route' =>
* geocodeResponse.address_components[1].types[1] = 'route'
* geocodeResponse.address_components[1].long_name = 'Wetstraat'
*/
function addresComponent(type, geocodeResponse) {
for(var i=0; i < geocodeResponse.address_components.length; i++) {
for (var j=0; j < geocodeResponse.address_components[i].types.length; j++) {
if (geocodeResponse.address_components[i].types[j] == type) {
return geocodeResponse.address_components[i].long_name;
}
}
}
return '';
}
</script>
</head>
<body onload="init()">
<div id="map-canvas"></div>
<div id="display"></div>
<h3>drag the marker - see some address components</h3>
</body>
</html>

Multiple Bing Map Pushpins from SQL not showing in Firefox & Chrome but do in IE

I'm displaying a Bing Map (v7) in my Webmatrix2 website with a series of pushpins & infoboxes drawn from a SQL Express database using a JSON enquiry.
While the maps appears in all 3 browsers I'm testing (IE, FF & Chrome) the pushpins are sometimes not showing in FF & Chrome, particularly if I refresh with Cntrl+F5
This is my first JSON and Bing Maps app so expect there's a few mistakes.
Any suggestions on how to improve the code and get display consistency?
#{
Layout = "~/_MapLayout.cshtml";
}
<script type="text/javascript" src="~/Scripts/jquery-1.9.1.min.js"></script>
<script type="text/javascript" src="http://ecn.dev.virtualearth.net/mapcontrol/mapcontrol.ashx?v=7.0"></script>
<link rel="StyleSheet" href="infoboxStyles.css" type="text/css">
<script type="text/javascript">
var map = null;
var pinLayer, pinInfobox;
var mouseover;
var pushpinFrameHTML = '<div class="infobox"><a class="infobox_close" href="javascript:closeInfobox()"><img src="/Images/close2.jpg" /></a><div class="infobox_content">{content}</div></div><div class="infobox_pointer"><img src="images/pointer_shadow.png"></div>';
var pinLayer = new Microsoft.Maps.EntityCollection();
var infoboxLayer = new Microsoft.Maps.EntityCollection();
function getMap() {
map = new Microsoft.Maps.Map(document.getElementById('map'), {
credentials: "my-key",
zoom: 4,
center: new Microsoft.Maps.Location(-25, 135),
mapTypeId: Microsoft.Maps.MapTypeId.road
});
pinInfobox = new Microsoft.Maps.Infobox(new Microsoft.Maps.Location(0, 0), { visible: false });
AddData();
}
$(function AddData() {
$.getJSON('/ListSchools', function (data) {
var schools = data;
$.each(schools, function (index, school) {
for (var i = 0; i < schools.length; i++) {
var pinLocation = new Microsoft.Maps.Location(school.SchoolLat, school.SchoolLon);
var NewPin = new Microsoft.Maps.Pushpin(pinLocation);
NewPin.title = school.SchoolName;
NewPin.description = "-- Learn More --";
pinLayer.push(NewPin); //add pushpin to pinLayer
Microsoft.Maps.Events.addHandler(NewPin, 'mouseover', displayInfobox);
}
});
infoboxLayer.push(pinInfobox);
map.entities.push(pinLayer);
map.entities.push(infoboxLayer);
});
})
function displayInfobox(e) {
if (e.targetType == "pushpin") {
var pin = e.target;
var html = "<span class='infobox_title'>" + pin.title + "</span><br/>" + pin.description;
pinInfobox.setOptions({
visible: true,
offset: new Microsoft.Maps.Point(-33, 20),
htmlContent: pushpinFrameHTML.replace('{content}', html)
});
//set location of infobox
pinInfobox.setLocation(pin.getLocation());
}
}
function closeInfobox() {
pinInfobox.setOptions({ visible: false });
}
function getCurrentLocation() {
var geoLocationProvider = new Microsoft.Maps.GeoLocationProvider(map);
geoLocationProvider.getCurrentPosition();
}
</script>
<body onload="getMap();">
<div id="map" style="position:relative; width:800px; height:600px;"></div>
<div>
<input type="button" value="Find Nearest Schools" onclick="getCurrentLocation();" />
</div>
</body>
The JSON file is simply
#{
var db = Database.Open("StarterSite");
var sql = #"SELECT * FROM Schools WHERE SchoolLon != ' ' AND SchoolLon != 'null' ";
var data = db.Query(sql);
Json.Write(data, Response.Output);
}
Add your pinLayer, infobox, and infoboxLayer before calling the AddData function and see if that makes a difference. Also verify that school.SchoolLat and school.SchoolLon are numbers and not a string version of a number. If they are a string, then use parseFloat to turn them into a number. Other than that everything looks fine.