Is it possible for Azure Kinect Body Tracking to accept depth data from a different depth camera? - kinect-v2

This is being done in a Unity project.
I'm currently experimenting with creating a new Azure Kinect Capture object and filling it with data from the color, depth, and IR frames being created by the K2. Since an Azure Kinect camera isn't supposed to be in use at the time, I create a fake Azure Kinect Calibration to be used in the creation of an Azure Kinect Bodytracking Tracker.
I'm running into a problem where if the data pulled from the Kinect v2 is successfully enqueued, the project hangs, and if a call is made to pop the enqueued data, the project hangs as soon as its run. I've added timeouts to the enqueue and pop, which fixed the freezing, however the popped BodyTracking Frame object never contains a body. I've set up a scene where the depth data is visualized to make sure its not being distorted or obscured and it looks fine.
Before I continue trying to make this work, I wanted to see if I was missing something here or if what I'm doing is even possible.
The fake calibration:
Calibration cal = new Calibration {
DepthCameraCalibration = new CameraCalibration {
Extrinsics = new Extrinsics {
Rotation = new float[] { 1.0f, 0.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 1.0f },
Translation = new float[] { 0.0f, 0.0f, 0.0f }
},
Intrinsics = new Intrinsics {
Type = CalibrationModelType.BrownConrady,
ParameterCount = 14,
Parameters = new float[] { 264.902374f, 261.016541f, 251.993011f, 252.0128f, 0.5496079f, -0.0305904336f, -0.00340628251f, 0.893285751f, 0.07668319f, -0.01748066f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f }
},
ResolutionWidth = 512,
ResolutionHeight = 512,
MetricRadius = 1.73999977f
},
ColorCameraCalibration = new CameraCalibration {
Extrinsics = new Extrinsics {
Rotation = new float[] { 0.9999973f, 0.00189682352f, -0.00130836014f, -0.00179401657f, 0.997216046f, 0.07454452f, 0.00144611555f, -0.07454198f, 0.9972168f },
Translation = new float[] { -32.1138039f, -2.46932817f, 3.97587371f }
},
Intrinsics = new Intrinsics {
Type = CalibrationModelType.BrownConrady,
ParameterCount = 14,
Parameters = new float[] { 957.2569f, 551.9336f, 913.142334f, 913.1438f, 0.4421505f, -2.83680415f, 1.73018765f, 0.32017225f, -2.644007f, 1.643955f, 0.0f, 0.0f, -0.000281378743f, 0.000288581447f, 0.0f }
},
ResolutionWidth = 1920,
ResolutionHeight = 1080,
MetricRadius = 1.7f
},
DeviceExtrinsics = new Extrinsics[] { //Device Extrinsics calibration chunk
new Extrinsics(){ Rotation = new float[] { 1.0f, 0.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 1.0f }, Translation = new float[] { 0.0f, 0.0f, 0.0f } },
new Extrinsics(){ Rotation = new float[] { 0.9999973f, 0.00189682352f, -0.00130836014f, -0.00179401657f, 0.997216046f, 0.07454452f, 0.00144611555f, -0.07454198f, 0.9972168f }, Translation = new float[] { -32.1138039f, -2.46932817f, 3.97587371f } },
new Extrinsics(){ Rotation = new float[] { -0.000347044057f, 0.110655256f, -0.9938588f, -0.999971569f, -0.007524097f, -0.000488546968f, -0.00753195f, 0.9938304f, 0.110654727f }, Translation = new float[] { 0.0f, 0.0f, 0.0f } },
new Extrinsics(){ Rotation = new float[] { 0.00211483915f, 0.106267117f, -0.994335353f, -0.999981642f, -0.005419674f, -0.00270606228f, -0.00567653868f, 0.994322836f, 0.1062537f }, Translation = new float[] { -51.137455f, 3.33257771f, 0.7745425f } },
new Extrinsics(){ Rotation = new float[] { 0.9999973f, -0.00179401657f, 0.00144611555f, 0.00189682352f, 0.997216046f, -0.07454198f, -0.00130836014f, 0.07454452f, 0.9972168f }, Translation = new float[] { 32.10354f, 2.81973743f, -3.82274985f } },
new Extrinsics(){ Rotation = new float[] { 0.99999994f, 0.0f, 0.0f, 0.0f, 0.99999994f, 0.0f, 0.0f, 0.0f, 1.0f }, Translation = new float[] { 0.0f, 0.0f, 0.0f } },
new Extrinsics(){ Rotation = new float[] { 0.00116317568f, 0.0362610966f, -0.9993417f, -0.9999825f, -0.005745603f, -0.00137240067f, -0.00579158543f, 0.9993258f, 0.03625378f }, Translation = new float[] { 4.100151f, -32.1219749f, 2.13753319f } },
new Extrinsics(){ Rotation = new float[] { 0.00361735234f, 0.0318452343f, -0.999486268f, -0.9999857f, -0.00381232449f, -0.00374062685f, -0.0039294865f, 0.9994855f, 0.0318309739f }, Translation = new float[] { -46.96882f, -28.77531f, 2.98985362f } },
new Extrinsics(){ Rotation = new float[] { -0.000347044057f, -0.999971569f, -0.00753195f, 0.110655256f, -0.007524097f, 0.9938304f, -0.9938588f, -0.000488546968f, 0.110654727f }, Translation = new float[] { 0.0f, 0.0f, 0.0f } },
new Extrinsics(){ Rotation = new float[] { 0.00116317568f, -0.9999825f, -0.00579158543f, 0.0362610966f, -0.005745603f, 0.9993258f, -0.9993417f, -0.00137240067f, 0.03625378f }, Translation = new float[] { -32.1138039f, -2.46932817f, 3.97587371f } },
new Extrinsics(){ Rotation = new float[] { 1.00000012f, 0.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 1.00000012f }, Translation = new float[] { 0.0f, 0.0f, 0.0f } },
new Extrinsics(){ Rotation = new float[] { 0.999987245f, -0.00242856354f, -0.0044323504f, 0.002436766f, 0.9999953f, 0.00184613629f, 0.00442783535f, -0.00185691414f, 0.9999885f }, Translation = new float[] { -51.137455f, 3.33257771f, 0.7745425f } },
new Extrinsics(){ Rotation = new float[] { 0.00211483915f, -0.999981642f, -0.00567653868f, 0.106267117f, -0.005419674f, 0.994322836f, -0.994335353f, -0.00270606228f, 0.1062537f }, Translation = new float[] { 3.44506049f, 4.682146f, -50.92106f } },
new Extrinsics(){ Rotation = new float[] { 0.00361735234f, -0.9999857f, -0.0039294865f, 0.0318452343f, -0.00381232449f, 0.9994855f, -0.999486268f, -0.00374062685f, 0.0318309739f }, Translation = new float[] { -28.5932484f, -1.602283f, -47.1475f } },
new Extrinsics(){ Rotation = new float[] { 0.999987245f, 0.002436766f, 0.00442783535f, -0.00242856354f, 0.9999953f, -0.00185691414f, -0.0044323504f, 0.00184613629f, 0.9999885f }, Translation = new float[] { 51.125248f, -3.45531416f, -1.0073452f } },
new Extrinsics(){ Rotation = new float[] { 0.99999994f, 0.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 1.0f }, Translation = new float[] { 0.0f, 0.0f, 0.0f } }
},
DepthMode = DepthMode.WFOV_2x2Binned,
ColorResolution = ColorResolution.R1080p
};
#endregion
tracker = Tracker.Create(cal, new TrackerConfiguration() {
SensorOrientation = SensorOrientation.Default,
ProcessingMode = TrackerProcessingMode.Gpu,
GpuDeviceId = 0
});
The depth into bodytracking process:
timeSinceStart = DateTime.Now.Subtract(timeOfStart);
colorFrame = colorFrameReader.AcquireLatestFrame();
depthFrame = depthFrameReader.AcquireLatestFrame();
irFrame = infraredFrameReader.AcquireLatestFrame();
KinectCapture capture = new KinectCapture();
if (colorFrame != null) {
Image colorImage = new Image(ImageFormat.ColorBGRA32, colorFrame.FrameDescription.Width, colorFrame.FrameDescription.Height) {
DeviceTimestamp = timeSinceStart
};
colorImage.SystemTimestampNsec = timeSinceStart.Milliseconds * 1000000;
if (colorBuffer == null) {
FrameDescription description = colorFrame.ColorFrameSource.FrameDescription;
colorBuffer = new byte[description.BytesPerPixel * description.Width * description.Height];
}
colorFrame.CopyRawFrameDataToArray(colorBuffer);
for (int i = 0; i < 2073600; i++) {
ushort uShort = BitConverter.ToUInt16(colorBuffer, i * 2);
colorImage.SetPixel<ushort>(i % 1080, i / 1080, uShort);
}
capture.Color = colorImage;
colorFrame.Dispose();
}
if (depthFrame != null) {
Image depthImage = new Image(ImageFormat.Depth16, 512, 512);
depthImage.DeviceTimestamp = timeSinceStart;
depthImage.SystemTimestampNsec = timeSinceStart.Milliseconds * 1000000;
if (depthBuffer == null) {
var description = depthFrame.DepthFrameSource.FrameDescription;
depthBuffer = new ushort[description.Width * description.Height];
}
depthFrame.CopyFrameDataToArray(depthBuffer);
for (int i = 0; i < 262144; i++) {
if(i < depthBuffer.Length)
{
depthImage.SetPixel<ushort>(i % 512, i / 512, depthBuffer[i]);
} else
{
depthImage.SetPixel<ushort>(i % 512, i / 512, 0);
}
}
capture.Depth = depthImage;
depthFrame.Dispose();
}
if (irFrame != null) {
Image irImage = new Image(ImageFormat.IR16, 512, 512);
irImage.DeviceTimestamp = timeSinceStart;
irImage.SystemTimestampNsec = timeSinceStart.Milliseconds * 1000000;
if (irBuffer == null) {
var description = irFrame.InfraredFrameSource.FrameDescription;
irBuffer = new ushort[description.Width * description.Height];
}
irFrame.CopyFrameDataToArray(depthBuffer);
for (int i = 0; i < 262144; i++) {
if (i < irBuffer.Length)
{
irImage.SetPixel<ushort>(i % 512, i / 512, irBuffer[i]);
}
else
{
irImage.SetPixel<ushort>(i % 512, i / 512, 0);
}
}
capture.IR = irImage;
irFrame.Dispose();
}
capture.Temperature = 30.0f;
try {
if(capture.Color != null && capture.Depth != null && capture.IR != null)
{
tracker.EnqueueCapture(capture, new TimeSpan(0, 0, 0, 0, 50));
Debug.Log("Successful Enqueue");
}
} catch (Exception ex) {
Debug.Log($"Failed to enqeue\n{ex.Message}");
}
try {
kFrame = tracker.PopResult(new TimeSpan(0, 0, 0, 15));
Debug.Log("Bodies in frame: " + kFrame.NumberOfBodies);
}
catch (Exception ex) {
Debug.Log($"Failed to pop from queue\n{ex.Message}");
}

Body Tracking SDK is designed to work with Azure Kinect devices.

Related

Print value of an array using three.js TextGeomtery

How can I print the values of an array using three.js textGeometry. Trying the following code but no output.
`for(let i=0;i<=4;i++)
{
let arr = [1,2,3,4,5,6,7,8];
let char = arr[i];
let loader = new THREE.FontLoader();
let font = loader.parse(fontJSON);
let geometry = new THREE.TextBufferGeometry(char ,{font : font , size : 1 , height : 0.1 });
let material = new THREE.MeshBasicMaterial({ color : 0xffffff });
let text = new THREE.Mesh(geometry , material);
text.position.set(i,0,0);
scene.add(text);
}`
You have to make sure to provide a string to TextBufferGeometry, no number. You can easily ensure this by calling toString() on your char variable. I've refactored your code a bit to show you a complete example.
let camera, scene, renderer;
init();
animate();
function init() {
camera = new THREE.PerspectiveCamera(70, window.innerWidth / window.innerHeight, 0.01, 10);
camera.position.z = 8;
scene = new THREE.Scene();
const material = new THREE.MeshBasicMaterial({
color: 0xffffff
});
const arr = [1, 2, 3, 4, 5, 6, 7, 8];
const loader = new THREE.FontLoader();
loader.load('https://threejs.org/examples/fonts/helvetiker_regular.typeface.json', (font) => {
for (let i = 0; i <= 4; i++) {
const char = arr[i];
const geometry = new THREE.TextBufferGeometry(char.toString(), {
font: font,
size: 1,
height: 0.1
});
const text = new THREE.Mesh(geometry, material);
text.position.set(i, 0, 0);
scene.add(text);
}
});
renderer = new THREE.WebGLRenderer({
antialias: true
});
renderer.setSize(window.innerWidth, window.innerHeight);
document.body.appendChild(renderer.domElement);
}
function animate() {
requestAnimationFrame(animate);
renderer.render(scene, camera);
}
body {
margin: 0;
}
canvas {
display: block;
}
<script src="https://cdn.jsdelivr.net/npm/three#0.117.1/build/three.js"></script>

webgl texture on expo gives black screen

I'm trying webGL for the first time, thing is I am working on Expo with the expo-gl package, aiming to build a filter component for photo editing. So far, I have been able to create a context successfully. When creating the shaders everything works fine (I can render a triangle, two triangles, etc...)
Problem comes when I try to load an image as a texture, I get no errors, but all I get is a black screen on the emulator and the phone (both android).
I have done checks for powOf2 images and image.onload, and I did a console.log for gl.texImage2D but got undefined. I would really appreciate any insight or help for this issue.
I divided into 4 parts what I think are the relevant pieces of code for this issue.
1.shaders(template literals):
const vertexShaderSource = '
attribute vec2 a_texCoord;
attribute vec4 a_position;
varying vec2 v_texCoord;
void main() {
gl_Position = a_position;
v_texCoord = a_texCoord;
}
';
const fragmentShaderSource = '
precision mediump float;
uniform sampler2D u_image;
varying vec2 v_texCoord;
void main() {
gl_FragColor = texture2D(u_image, v_texCoord);
}
';
2.ReactNative(expo) state and lifecicle:
export default class App extends React.Component {
state = {
ready: false,
image: null,
};
componentDidMount() {
(async () => {
const image = Asset.fromModule(require('./bw.jpg'));
await image.downloadAsync();
this.setState({
ready: true,
image,
});
})();
}
render() {
return (
<View style={styles.container}>
<Image source={require('./back.jpg')} style={{ width: '100%' }} />
<GLView
style={{ width: '100%', height: '100%', position: 'absolute' }}
onContextCreate={this._onContextCreate}
/>
</View>
);
}
3.gl context:
_onContextCreate = gl => {
if (_initialized) { return }
function createShader(gl, type, source) {
var shader = gl.createShader(type);
gl.shaderSource(shader, source);
gl.compileShader(shader);
var success = gl.getShaderParameter(shader, gl.COMPILE_STATUS);
if (success) {
return shader;
}
//on error
console.log(gl.getShaderInfoLog(shader));
gl.deleteShader(shader);
}
//get shaders
var vertexShader = createShader(gl, gl.VERTEX_SHADER, vertexShaderSource);
var fragmentShader = createShader(gl, gl.FRAGMENT_SHADER, fragmentShaderSource);
function createProgram(gl, vertexShader, fragmentShader) {
var program = gl.createProgram();
gl.attachShader(program, vertexShader);
gl.attachShader(program, fragmentShader);
gl.linkProgram(program);
//on error
var success = gl.getProgramParameter(program, gl.LINK_STATUS);
if (success) {
return program;
}
console.log(gl.getProgramInfoLog(program));
gl.deleteProgram(program);
}
//create program
var program = createProgram(gl, vertexShader, fragmentShader);
//get attributes
var positionAttributeLocation = gl.getAttribLocation(program, "a_position");
//a_position buffer for fragment shader
var positionBuffer = gl.createBuffer();
gl.bindBuffer(gl.ARRAY_BUFFER, positionBuffer);
// three 2d points
gl.bufferData(gl.ARRAY_BUFFER, new Float32Array([
-1, -1,
-1, 1,
1, -1,
1, 1,
1, -1,
-1, 1,
]), gl.STATIC_DRAW);
//create the viewport
gl.viewport(0, 0, gl.drawingBufferWidth, gl.drawingBufferHeight);
// Clear the canvas
gl.clearColor(0.0, 0.0, 0.0, 0.0);
gl.clear(gl.COLOR_BUFFER_BIT);
// use program
gl.useProgram(program);
gl.enableVertexAttribArray(positionAttributeLocation);
gl.vertexAttribPointer(
positionAttributeLocation, 2, gl.FLOAT, false, 0, 0)
gl.drawArrays(gl.TRIANGLES, 0, 6);
4.Code for the texture, and end of _onContextCreate:
//get image from state
const image = this.state.image
var texCoordLocation = gl.getAttribLocation(program, "a_texCoord");
var uSampler = gl.getUniformLocation(program, 'u_image');
// provide texture coordinates for the rectangle.
var texCoordBuffer = gl.createBuffer();
gl.bindBuffer(gl.ARRAY_BUFFER, texCoordBuffer);
var positions = [
-1,-1,
-1, 1,
1,-1,
1, 1,
1,-1,
-1, 1,
];
gl.bufferData(gl.ARRAY_BUFFER, new Float32Array(positions), gl.STATIC_DRAW);
gl.enableVertexAttribArray(texCoordLocation);
gl.vertexAttribPointer(texCoordLocation, 2, gl.FLOAT, false, 0, 0);
//create texture
var texture = gl.createTexture();
//check if image is ready
if (image.downloaded) loadTexture(texture, image)
//get image width & height for pow2 check.
const { width, height } = Image.resolveAssetSource(image);
function loadTexture(texture, img) {
gl.activeTexture(gl.TEXTURE0);
gl.bindTexture(gl.TEXTURE_2D, texture);
gl.uniform1i(uSampler, 0);
//pow2 check
if (isPowerOf2(width) && isPowerOf2(height)) {
gl.generateMipmap(gl.TEXTURE_2D);
} else {
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_S, gl.CLAMP_TO_EDGE);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_T, gl.CLAMP_TO_EDGE);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MIN_FILTER, gl.LINEAR);
gl.texImage2D(gl.TEXTURE_2D, 0, gl.RGBA, gl.RGBA, gl.UNSIGNED_BYTE, img);
}
return texture
}
function isPowerOf2(value) {
return (value & (value - 1)) == 0;
}
gl.flush();
gl.endFrameEXP();
_initialized = true;
};
Thanks in advance!

Keep objects looking at camera

guys I know this question has been asked several times, several different ways, but I just can get it to work. Basically I have 2d clouds, but I want the camera to rotate around an object floating above the clouds. The problem is, when im not looking a the face of the clouds u can tell that they are 2d. Soooo i want the the clouds to "look" at the camera where ever it is. I believe my problem stems from how the cloud geometry is called on to the planes, but here take a look. I put the a lookAt function with in my animate function. I hope you can point me in the right direction at least.
Three.js rev. 70...
container.appendChild(renderer.domElement);
camera = new THREE.PerspectiveCamera(fov, aspect, near, far);
camera.position.set(0, 0, 100);
scene.add(camera);
controls = new THREE.OrbitControls( camera );
controls.target.copy( new THREE.Vector3( 0, 0,475) );
controls.minDistance = 50;
controls.maxDistance = 200;
controls.autoRotate = true;
controls.autoRotateSpeed = .2; // 30 seconds per round when fps is 60
controls.minPolarAngle = Math.PI/4; // radians
controls.maxPolarAngle = Math.PI/2; // radians
controls.enableDamping = true;
controls.dampingFactor = 0.25;
clock = new THREE.Clock();
cloudGeometry = new THREE.Geometry();
var texture = THREE.ImageUtils.loadTexture('img/cloud10.png', null, animate);
texture.magFilter = THREE.LinearMipMapLinearFilter;
texture.minFilter = THREE.LinearMipMapLinearFilter;
var fog = new THREE.Fog(0x4584b4, -100, 3000);
cloudMaterial = new THREE.ShaderMaterial({
uniforms: {
"map": {
type: "t",
value: texture
},
"fogColor": {
type: "c",
value: fog.color
},
"fogNear": {
type: "f",
value: fog.near
},
"fogFar": {
type: "f",
value: fog.far
},
},
vertexShader: document.getElementById('vs').textContent,
fragmentShader: document.getElementById('fs').textContent,
depthWrite: false,
depthTest: false,
transparent: true
});
var plane = new THREE.Mesh(new THREE.PlaneGeometry(64, 64));
for (var i = 0; i < 8000; i++) {
plane.position.x = Math.random() * 1000 - 500;
plane.position.y = -Math.random() * Math.random() * 200 - 15;
plane.position.z = i;
plane.rotation.z = Math.random() * Math.PI;
plane.scale.x = plane.scale.y = Math.random() * Math.random() * 1.5 + 0.5;
plane.updateMatrix();
cloudGeometry.merge(plane.geometry, plane.matrix);
}
cloud = new THREE.Mesh(cloudGeometry, cloudMaterial);
scene.add(cloud);
cloud = new THREE.Mesh(cloudGeometry, cloudMaterial);
cloud.position.z = -8000;
scene.add(cloud);
var radius = 100;
var xSegments = 50;
var ySegments = 50;
var geo = new THREE.SphereGeometry(radius, xSegments, ySegments);
var mat = new THREE.ShaderMaterial({
uniforms: {
lightPosition: {
type: 'v3',
value: light.position
},
textureMap: {
type: 't',
value: THREE.ImageUtils.loadTexture("img/maps/moon.jpg")
},
normalMap: {
type: 't',
value: THREE.ImageUtils.loadTexture("img/maps/normal.jpg")
},
uvScale: {
type: 'v2',
value: new THREE.Vector2(1.0, 1.0)
}
},
vertexShader: document.getElementById('vertexShader').textContent,
fragmentShader: document.getElementById('fragmentShader').textContent
});
mesh = new THREE.Mesh(geo, mat);
mesh.geometry.computeTangents();
mesh.position.set(0, 50, 0);
mesh.rotation.set(0, 180, 0);
scene.add(mesh);
}
function onWindowResize() {
renderer.setSize(window.innerWidth, window.innerHeight);
camera.aspect = window.innerWidth / window.innerHeight;
camera.updateProjectionMatrix();
}
function animate() {
requestAnimationFrame(animate);
light.orbit(mesh.position, clock.getElapsedTime());
cloud.lookAt( camera );
controls.update(camera);
renderer.render(scene, camera);
}
animate();
window.addEventListener('resize', onWindowResize, false);
just a first guess:
the lookAt function needs Vector3 as parameter. try to use camera.position in the animate function.
cloud.lookAt( camera.position );
first of all, to build 2D objects in a scene that always faces towards the camera, you should use Sprite object, so you don't have to do anything to get this effect. (and have better performance :))
Definition from THREE.org: Sprite - a sprite is a plane in an 3d scene which faces always towards the camera.
var map = THREE.ImageUtils.loadTexture( "sprite.png" );
var material = new THREE.SpriteMaterial( { map: map, color: 0xffffff, fog: true } );
var sprite = new THREE.Sprite( material );
scene.add( sprite );
Please check this example: http://threejs.org/examples/#webgl_points_sprites
I would absolutely agree, I would use Sprite, or even Points, but then, if assign a texture to it, it will render it square-sized. My sprites are animated, and frames cannot be packed in square tiles, cause it would take a lot of space. I might make a mesh and use this lookAt function.

three js vertices does not update

I'm using three.js r67, and vertices does not seem to be updated.
I set geometry.dynamic = true, geometry.verticesNeedUpdate = true.
Circle is moving, but line is static....
Someone could help me?
var scene = new THREE.Scene();
var renderer = new THREE.WebGLRenderer();
var g = new THREE.CircleGeometry( 4, 16 );
var m = new THREE.MeshBasicMaterial({color: 0x114949});
var circle = new THREE.Mesh( g, m );
circle.position.x = 2;
circle.position.y = 2;
circle.position.z = -1;
scene.add( circle );
var material = new THREE.LineBasicMaterial({color: 0xDF4949, linewidth: 5});
var geometry = new THREE.Geometry();
geometry.vertices.push(new THREE.Vector3(0, 0, 0));
geometry.vertices.push(new THREE.Vector3(1, 1, 0));
geometry.verticesNeedUpdate = true;
geometry.dynamic = true;
var line = new THREE.Line(geometry, material);
scene.add(line);
var update = function() {
circle.position.x += 0.01;
line.geometry.vertices[0].x = circle.position.x;
};
var render = function() {
renderer.render(scene, camera);
};
var loop = function() {
update();
render();
requestAnimationFrame(loop, renderer.canvas);
};
loop();
Note: The legacy Geometry class has been replaced with the BufferGeometry class.
If you want to update the vertex positions of your BufferGeometry, you need to use a pattern like so:
mesh.geometry.attributes.position.array[ 0 ] += 0.01;
mesh.geometry.attributes.position.needsUpdate = true;
After rendering, you need to reset the needsUpdate flag to true every time the attribute values are changed.
three.js r.147

kineticjs, should stage.setWidth(), stage.setHeight() and stage.draw(); resize everything?

Im trying to resize everything on window resize.
I have this function but only seems to resize one layer ( graphLayer.add(drawGraph);)
I thought the code below should resize everything??
function onResize(){
var widowWidth = (window.innerWidth) -yPadding; // width - the padding
var widowHeight = (window.innerHeight) -xPadding; // Height - the padding
stage.setWidth((window.innerWidth) -yPadding);
stage.setHeight((window.innerHeight) -xPadding);
stage.draw();
}
here is a the basics of my code
$(window).resize(onResize);
var graph;
var graphLayer = new Kinetic.Layer();
var BubbleLayer = new Kinetic.Layer();
var tooltipLayer = new Kinetic.Layer();
var widowWidth = (window.innerWidth) -yPadding; // width - the padding
var widowHeight = (window.innerHeight) -xPadding; // Height - the padding
var stage = new Kinetic.Stage({
container: 'graph',
width: widowWidth, // width - the padding
height: widowHeight, // Height - the padding
});
var tooltip = new Kinetic.Label({
opacity: 0.75,
visible: false,
listening: false
});
tooltip.add(new Kinetic.Tag({
.........
}));
tooltip.add(new Kinetic.Text({
.........
}));
var drawGraph = new Kinetic.Shape ({
sceneFunc: function(ctx){
.........
}
ctx.fillStrokeShape(this);
},
stroke: 'black',
strokeWidth: 1
});
var drawGraphquarter = new Kinetic.Shape ({
sceneFunc: function(ctx){
.........
}
},
stroke: 'red',
strokeWidth: 3
});
// build data
$.getJSON( "bubble_data.json", function( data ) {
$.each( data.projects, function(i) {
var bubbleData = [];
.........
bubbleData.push({
.........
});
.........
for(var n = 0; n < bubbleData.length; n++) {
addBubble(bubbleData[n], BubbleLayer);
stage.add(BubbleLayer);
}
});
graphLayer.add(drawGraph);
graphLayer.add(drawGraphquarter);
stage.add(BubbleLayer);
stage.add(graphLayer);
graphLayer.moveToBottom();
tooltipLayer.add(tooltip);
stage.add(tooltipLayer);
});
});
// add bubles to layer
function addBubble(obj, BubbleLayer) {
var bubble = new Kinetic.Shape({
sceneFunc: function(ctx) {
.........
});
BubbleLayer.add(bubble);
}
// calendar quarter
function getQuarter(d) {
.........
}
function onResize(){
var widowWidth = (window.innerWidth) -yPadding; // width - the padding
var widowHeight = (window.innerHeight) -xPadding; // Height - the padding
stage.setWidth((window.innerWidth) -yPadding);
stage.setHeight((window.innerHeight) -xPadding);
stage.draw();
}
Updated my code to this.
var graph;
var xPadding = 10;
var yPadding = 10;
var graphLayer = new Kinetic.Layer();
var graphLayerQuater = new Kinetic.Layer();
var BubbleLayer = new Kinetic.Layer();
var tooltipLayer = new Kinetic.Layer();
var widowWidth = (window.innerWidth) -yPadding; // width - the padding
var widowHeight = (window.innerHeight) -xPadding; // Height - the padding
var stage = new Kinetic.Stage({
container: 'graph',
width: widowWidth,
height: widowHeight,
});
var initialScale = stage.scale(); //returns {x: 1, y: 1}
var initialWidth = (window.innerWidth) -yPadding; // width - the padding
var initialHeight = (window.innerHeight) -xPadding; // Height - the padding
onresize function
window.onresize = function(event) {
var width = (window.innerWidth) -yPadding;
var height =(window.innerHeight) -xPadding;
var xScale = (width / initialWidth) * initialScale.x;
var yScale = (height / initialHeight) * initialScale.y;
var newScale = {x: xScale, y: yScale};
stage.setAttr('width', width);
stage.setAttr('height', height);
stage.setAttr('scale', newScale );
stage.draw();
}