I'm using perspective camera and want to make distant object visual size like close objects.
I check distance from close and distant objects
function getDistance(mesh1, mesh2) {
var dx = mesh1.position.x - mesh2.position.x;
var dy = mesh1.position.y - mesh2.position.y;
var dz = mesh1.position.z - mesh2.position.z;
return Math.sqrt(dx*dx+dy*dy+dz*dz);
}
and check every loop:
var diff = (getDistance(camera, mesh5) - getDistance(camera, mesh1)) / (getDistance(camera, mesh5) + getDistance(camera, mesh1)) * 100;
console.log(diff);
mesh5.scale.set(diff, diff, 1);
But size is not equivalent.
Related
I am making a space game in Godot and whenever my ship is a big distance away from (0,0,0) every time I move the camera or the ship, it shakes violently. Here is my code for moving the ship:
extends KinematicBody
export var default_speed = 500000
export var max_speed = 5000
export var acceleration = 100
export var pitch_speed = 1.5
export var roll_speed = 1.9
export var yaw_speed = 1.25
export var input_response = 8.0
var velocity = Vector3.ZERO
var forward_speed = 0
var vert_speed = 0
var pitch_input = 0
var roll_input = 0
var yaw_input = 0
var alt_input = 0
var system = "System1"
func _ready():
look_at(get_parent().get_node("Star").translation, Vector3.UP)
func get_input(delta):
if Input.is_action_pressed("boost"):
max_speed = 299792458
acceleration = 100
else:
max_speed = default_speed
acceleration = 100
if Input.is_action_pressed("throttle_up"):
forward_speed = lerp(forward_speed, max_speed, acceleration * delta)
if Input.is_action_pressed("throttle_down"):
forward_speed = lerp(forward_speed, 0, acceleration * delta)
pitch_input = lerp(pitch_input, Input.get_action_strength("pitch_up") - Input.get_action_strength("pitch_down"), input_response * delta)
roll_input = lerp(roll_input, Input.get_action_strength("roll_left") - Input.get_action_strength("roll_right"), input_response * delta)
yaw_input = lerp(yaw_input, Input.get_action_strength("yaw_left") - Input.get_action_strength("yaw_right"), input_response * delta)
func _physics_process(delta):
get_input(delta)
transform.basis = transform.basis.rotated(transform.basis.z, roll_input * roll_speed * delta)
transform.basis = transform.basis.rotated(transform.basis.x, pitch_input * pitch_speed * delta)
transform.basis = transform.basis.rotated(transform.basis.y, yaw_input * yaw_speed * delta)
transform.basis = transform.basis.orthonormalized()
velocity = -transform.basis.z * forward_speed * delta
move_and_collide(velocity * delta)
func _on_System1_area_entered(area):
print(area, area.name)
system = "E"
func _on_System2_area_entered(area):
print(area, area.name)
system = "System1"
Is there any way to prevent this from happening?
First of all, I want to point out, that this is not a problem unique to Godot. Although other engines have automatic fixes for it.
This happens because the precision of floating point numbers decreases as it goes away form the origin. In other words, the gap between one floating number and the next becomes wider.
The issue is covered in more detail over the game development sister site:
Why loss of floating point precision makes rendered objects vibrate?
Why does the resolution of floating point numbers decrease further from an origin?
What's the largest "relative" level I can make using float?
Why would a bigger game move the gameworld around the Player instead of just moving a player within a gameworld?
Moving player inside of moving spaceship?
Spatial Jitter problem in large unity project
Godot uses single precision. Support for double precision has been added in Godot 4, but that just reduces the problem, it does not eliminate it.
The general solution is to warp everything, in such way that the player is near the origin again. So, let us do that.
We will need a reference to the node we want to keep near the origin. I'm going to assume which node it is does not change during game play.
export var target_node_path:NodePath
onready var _target_node:Spatial = get_node(target_node_path)
And we will need a reference to the world we need to move. I'm also assuming it does not change. Furthermore, I'm also assuming the node we want to keep near the origin is a child of it, directly or indirectly.
export var world_node_path:NodePath
onready var _world_node:Node = get_node(target_node_path)
And we need a maximum distance at which we perform the shift:
export var max_distance_from_origin:float = 10000.0
We will not move the world itself, but its children.
func _process() -> void:
var target_origin := _target_node.global_transform.origin
if target_origin.length() < max_distance_from_origin:
return
for child in _world_node.get_children():
var spatial := child as Spatial
if spatial != null:
spatial.global_translate(-target_origin)
Now, something I have not seen discussed is what happens with physics objects. The concern is that The physics server might be trying to move them in the old position (in practice this is only a problem with RigidBody), and it will overwrite what we did.
So, if that is a problem… We can handle physic objects with a teleport. For example:
func _process() -> void:
var target_origin := _target_node.global_transform.origin
if target_origin.length() < max_distance_from_origin:
return
for child in _world_node.get_children():
var spatial := child as Spatial
if spatial != null:
var body_transform := physics_body.global_transform
var new_transform := Transform(
body_transform.basis,
body_transform.origin - target_origin
)
spatial.global_transform = new_transform
var physics_body := spatial as PhysicsBody # Check for RigidBody instead?
if physics_body != null:
PhysicsServer.body_set_state(
physics_body.get_rid(),
PhysicsServer.BODY_STATE_TRANSFORM,
new_transform
)
But be aware that the above code does not consider any physics objects deeper in the scene tree.
I am using webview2 with google maps api scripts. With 96 dpi (100% scale) the static map is inserted correctly into AutoCAD, the lat and long are calculated correcly based on the screen point. If I increase the display scale higher than 100%, the lat and long are calculated wrong based on the screen point, the static map is inserted not far from the right position in AutoCAD.
What would be the solution with higher dpi?
Here is the script I use
function getMapType() {
return map.getMapTypeId();
}
function getMapZoom() {
return map.getZoom();
}
function getMapCenter() {
var c = map.getCenter();
return c.lat() + "|" + c.lng();
}
function getMapProjection() {
projection = map.getProjection();
topRight = projection.fromLatLngToPoint(map.getBounds().getNorthEast());
bottomLeft = projection.fromLatLngToPoint(map.getBounds().getSouthWest());
scale = 1 << map.getZoom();
}
function getLatLongByScreenPoint(x, y) {
var c = projection.fromPointToLatLng(new google.maps.Point(x / scale + bottomLeft.x, y / scale + topRight.y));
return c.lat() + "|" + c.lng();
}
I have a rectangle with a sprite on it and I have to detect if the touch position lies within the rectangle.
This is my code,
if (Gdx.input.isTouched())
{
int x1 = Gdx.input.getX();
int y1 = Gdx.input.getY();
Vector3 inputs = new Vector3(x1, y1, 0);
gamecam.unproject(inputs);
Gdx.app.log("x" + inputs.x, "y" + inputs.y);
Gdx.app.log("rect" + rectangle.x, "rect" + rectangle.y);
if(rectangle.contains(inputs.x,inputs.y))
{
Gdx.app.log("x" + inputs.x, "y" + inputs.y);
Gdx.app.log("rect" + rectangle, "rect" + rectangle.y);
}
}
Rectangle definition,
BodyDef bdef = new BodyDef();
bdef.type = BodyDef.BodyType.StaticBody;
b2body = screen.getWorld().createBody(bdef);
rectangle = new Rectangle();
rectangle.setHeight(55);
rectangle.setWidth(55);
PolygonShape head = new PolygonShape();
rectangle.setX(300);
rectangle.setY(10);
bdef.position.set((rectangle.getX() - rectangle.getWidth() / 2) / MyJungleGame.PPM, (rectangle.getY() - rectangle.getHeight() / 2) / MyJungleGame.PPM);
head.setAsBox(rectangle.getWidth() / 2 / MyJungleGame.PPM, rectangle.getHeight() / 2 / MyJungleGame.PPM);
FixtureDef fdef = new FixtureDef();
fdef.shape = head;
setPosition(b2body.getPosition().x - getWidth() / 2, b2body.getPosition().y - getHeight() / 2);
This is my output,
The small rectangle at the bottom of the screen is the rectangle I created. But, nothing happens when I click it. I checked the coordinates and here is the log,
x-0.925: y-0.5625
rect300.0: rect10.0
x-0.925: y-0.5625
rect300.0: rect10.0
x-0.925: y-0.5625
I tried checking the touch using the below method,
if (inputs.x > sprite.getX() && inputs.x < sprite.getX() + sprite.getWidth())
{
if (inputs.y > sprite.getY() && inputs.y < sprite.getY() + sprite.getHeight())
{
Gdx.app.log("sprite touched", "");
}
}
This too doesn't work. Any idea where I made the mistake ? Please help . Thanks in advance
Since you are using Box2D, to detect collisions via the common way is more complicated to new users.
However, looking on your code...
I would advice taking this coordinates in consideration with PPM of your world :
int x1 = Gdx.input.getX();
int y1 = Gdx.input.getY();
Vector3 inputs = new Vector3(x1, y1, 0);
Also, If you are going to build a collision system with box2d, you should use this : http://www.aurelienribon.com/blog/2011/07/box2d-tutorial-collision-filtering/
I'm making a bidirectional path tracer and I have some troubles.
To be clear :
1) One point light
2) All objects are diffuse
3) All objects are spheres, even walls (they are very large)
4) NO MIS WEIGHTING
The light emission is a 3D vector. The BRDF of a sphere is a 3D vector. Hard coded.
In the main function below I generate EyePath and LightPath then I connect them. At least I try.
In this post I will talking about the main function then EyePath then LightPath. The talking about connecting function will appear once EyePath and Light are good.
First questions :
Does the generation of the first light point is good ?
Do I need to compute this point according to the emission of the light source? or is it just the emission ? The line is commented where i'm filling the Vertices structure.
Do I need to translate fromlight ? In order to put it on the sphere
The code below is sampled in the main function. Above it there is two for loops going through all pixels. Camera.o is the eye. CameraRayDir is the direction to the current pixel.
//The path light starting point is at the same position as the light
Ray fromLight(Vec(0, 24.3, 0), Vec());
Sphere light = spheres[7];
#define PDF 0.15915494309 // 1 / (2 * PI)
for(int i = 0; i < samps; ++i)
{
std::vector<Vertices> PathEye;
std::vector<Vertices> PathLight;
Vec cameraRayDir = cx * (double(x) / w - .5) + cy * (double(y) / h - .5) + camera.d;
Ray rayEye(camera.o, cameraRayDir.norm());
// Hemisphere oriented towards the top
fromLight.d = generateRayInHemisphere(fromLight.o,Vec(0,1,0)).d;
double f = clamp(n.dot(fromLight.d.norm()));
Vertices vert;
vert.d = fromLight.d;
vert.x = fromLight.o;
vert.id = 7;
vert.cos = f;
vert.n = Vec(0,1,0).norm();
// this one ?
//vert.couleur = spheres[7].e * f / PDF;
// Or this one ?
vert.couleur = spheres[7].e;
PathLight.push_back(vert);
int sizeEye = generateEyePath(PathEye, rayEye, maxDepth);
int sizeLight = generateLightPath(PathLight, fromLight, maxDepth);
for (int s = 0; s < sizeLight; ++s)
{
for (int t = 1; t < sizeEye; ++t)
{
int depth = t + s - 1;
if ((s == 0 && t == 0) || depth < 0 || depth > maxDepth)
continue;
pixelValue = pixelValue + connectPaths(PathEye, PathLight, s, t);
}
}
}
For the EyePath I intersect the geometry then I compute the illumination according to the distance with the light. The colour is black if the point is in the shadow.
Second question : For the eye path and the direct illumination, is the computation good ? I've seen in many code, people use the pdf even in direct illumination. But I'm only using point light and spheres.
int generateEyePath(std::vector<Vertices>& v, Ray eye, int maxDepth)
{
double t;
int id = 0;
Vertices vert;
int RussianRoulette;
while(v.size() <= maxDepth)
{
if(distribRREye(generatorRREye) < 10)
break;
// Intersect all the geometry
// id is the id of the intersected geometry in an array
intersect(eye, t, id);
const Sphere& obj = spheres[id];
// Intersection point
Vec x = eye.o + eye.d * t;
// normal
Vec n = (x - obj.p).norm();
Vec direction = light.p - x;
// Shadow ray
Ray RaytoLight = Ray(x, direction.norm());
const float distance = direction.length();
// shadow
const bool visibility = intersect(RaytoLight, t, id);
const Sphere &lumiere = spheres[id];
float degree = clamp(n.dot((lumiere.p - x).norm()));
// If the intersected geometry is not a light, then in shadow
if(lumiere.e.x == 0)
{
vert.couleur = Vec();
}
else // else we compute the colour
// obj.c is the brdf, lumiere.e is the emission
vert.couleur = (obj.c).mult(lumiere.e / (distance * distance)) * degree;
vert.x = x;
vert.id = id;
vert.n = n;
vert.d = eye.d.normn();
vert.cos = degree;
v.push_back(vert);
eye = generateRayInHemisphere(x,n);
}
return v.size();
}
For the LightPath, for a given point, I compute it according to the previous one and the values at this point. Like in a common path tracing.\n
Third question: Is the colour computation good ?
int generateLightPath(std::vector<Vertices>& v, Ray fromLight, int maxDepth)
{
double t;
int id = 0;
Vertices vert;
Vec previous;
while(v.size() <= maxDepth)
{
if(distribRRLight(generatorRRLight) < 10)
break;
previous = v.back().couleur;
intersect(fromLight, t, id);
// intersected geometry
const Sphere& obj = spheres[id];
// Intersection point
Vec x = fromLight.o + fromLight.d * t;
// normal
Vec n = (x - obj.p).norm();
double f = clamp(n.dot(fromLight.d.norm()));
// obj.c is the brdf
vert.couleur = previous.mult(((obj.c / M_PI) * f) / PDF);
vert.x = x;
vert.id = id;
vert.n = n;
vert.d = fromLight.d.norm();
vert.cos = f;
v.push_back(vert);
fromLight = generateRayInHemisphere(x,n);
}
return v.size();
}
For the moment I get this result.
enter image description here
The connecting function will come once EyePath and LightPath are good.
Thank you all
Try the spherical reference scene mentioned in this paper. I think then you can work out most of your questions by yourself since it has an analytical solution.
https://www.researchgate.net/publication/221546261_Testing_Monte-Carlo_Global_Illumination_Methods_with_Analytically_Computable_Scenes
It would save your time to implement and verify your understanding with path tracing and light tracing first, then try to combine them with weights.
I am working on Kinect for my research project . I have worked previously to calculate the joint angle of kinect and the joint coordinates. I would like to calculate the center of mass of the body which is being tracked.
Any idea would be appreciated and code snippets would be immensely helpful.
I owe a lot to stack overflow without the community help it would had not been possible to do such a thing.
Thanks in Advance
Please find the code where i want to include this center of mass function. This function tracks the skeleton.
Skeleton GetFirstSkeleton(AllFramesReadyEventArgs e)
{
using (SkeletonFrame skeletonFrameData = e.OpenSkeletonFrame())
{
if (skeletonFrameData == null)
{
return null;
}
skeletonFrameData.CopySkeletonDataTo(allSkeletons);
//get the first tracked skeleton
Skeleton first = (from s in allSkeletons
where s.TrackingState == SkeletonTrackingState.Tracked
select s).FirstOrDefault();
return first;
}
I have tried using this code in my code but its not getting accustomed , can any one please help me include the center of mass code.
oreach (SkeletonData data in skeletonFrame.Skeletons) {
SkeletonFrame allskeleton = e.SkeletonFrame;
// Count passive and active person up to six in the group
int numberOfSkeletonsT = (from s in allskeleton.Skeletons
where s.TrackingState == SkeletonTrackingState.Tracked select s).Count();
int numberOfSkeletonsP = (from s in allskeleton.Skeletons
where s.TrackingState == SkeletonTrackingState.PositionOnly select s).Count();
// Count passive and active person up to six in the group
int totalSkeletons = numberOfSkeletonsP + numberOfSkeletonsT;
//Console.WriteLine("TotalSkeletons = " + totalSkeletons);
//======================================================
if (data.TrackingState == SkeletonTrackingState.PositionOnly)
{
foreach (Joint joint in data.Joints)
{
if (joint.Position.Z != 0)
{
double centerofmassX = com.Position.X;
double centerofmassY = com.Position.Y;
double centerofmassZ = com.Position.Z;
Console.WriteLine( centerofmassX + centerofmassY + centerofmassZ );
}
}
See a couple of resources here:
http://mathwiki.ucdavis.edu/Calculus/Vector_Calculus/Multiple_Integrals/Moments_and_Centers_of_Mass#Three-Dimensional_Solids
http://www.slideshare.net/GillianWinters/center-of-mass-presentation
http://en.wikipedia.org/wiki/Locating_the_center_of_mass
Basically no matter what, you are going to need to find the mass of your user. This can be a simple input, then you can determine how much weight the person puts on each foot and use the equations described at all of these sources. Another option may be to use plumb lines on a planar shape representation of the user in 2D, However that won't be the actually accurate 3D center of mass.
Here is an example of how to find what amount of mass is on each foot. using the equation found on http://www.vitutor.com/geometry/distance/line_plane.html
Vector3 v = new Vector3(skeleton.Joints[JointType.Head].Position.X, skeleton.Joints[JointType.Head].Position.Y, skeleton.Joints[JointType.Head].Position.Z);
double mass;
double leftM, rightM;
double A = sFrame.FloorClipPlane.X,
B = sFrame.FloorClipPlane.Y,
C = sFrame.FloorClipPlane.Z;
//find angle
double angle = Math.ASin(Math.Abs(A * v.X + B * v.Y * C * v.Z)/(Math.Sqrt(A * A + B * B + C * C) * Math.Sqrt(v.X * v.X + v.Y * v.Y + v.Z * v.Z)));
if (angle == 90.0)
{
leftM = mass / 2.0;
rightM = mass / 2.0;
}
double distanceFrom90 = 90.0 - angle;
if (distanceFrom90 > 0)
{
double leftMultiple = distanceFrom90 / 90.0;
leftM = mass * leftMultiple;
rightM = mass - leftM;
}
else
{
double rightMultiple = distanceFrom90 / 90.0;
rightM = rightMultiple * mass;
leftM = mass - rightMultiple;
}
This is of course assuming that the user is on both feet, but you could modify the code to create a new plane based off the users feet instead of the automatic one generated by Kinect.
The code to then find the center of mass you have to choose a datum. I would choose the head as that is the top of the person, and you can measure down from it easily. Using the steps found here:
double distanceFromDatumLeft = Math.Sqrt(Math.Pow(headX - footLeftX, 2) + Math.Pow(headY - footLeftY, 2) + Math.Pow(headZ - footLeftZ, 2));
double distanceFromDatumLeft = Math.Sqrt(Math.Pow(headX - footRightX, 2) + Math.Pow(headY - footRightY, 2) + Math.Pow(headZ - footRightZ, 2));
double momentLeft = distanceFromDatumLeft * leftM;
double momentRight = distanceFromDatumRight * rightM;
double momentSum = momentLeft + momentRight;
//measured in units from the datum
double centerOfGravity = momentSum / mass;
You then can of course show this on the screen by passing a point to plot that is centerOfGravity points below the head.