Dropping vertex on a vertex, creating an edge between them - jgraphx

I'm trying to figure out how to implement the above functionality? i.e. If I have a vertex already in my graph, and I drag another vertex from my palette (I've derived my app from the Java BasicGraphEditor example), I'd like to automatically create an edge from the drop target to the dropped cell.
The current implementation creates a group, which I do NOT want.
Any suggestions?
Thanks!

Ok, it seems that this is a solution to the above question.
Extend mxGraphComponent, and implement the following method:
public Object[] importCells(Object[] cells, double dx, double dy, Object target, Point location) {
if (target == null && cells.length == 1 && location != null) {
target = getCellAt(location.x, location.y);
if (target instanceof mxICell && cells[0] instanceof mxICell) {
mxICell targetCell = (mxICell) target;
mxICell dropCell = (mxICell) cells[0];
if (targetCell.isVertex() == dropCell.isVertex()) {
// make target null, otherwise we create a group
cells = super.importCells(cells, dx, dy, null, location);
Object parent = graph.getModel().getParent(target);
// we cloned it, so update the reference
dropCell = (mxICell) cells[0];
graph.insertEdge(parent, null, "", target, dropCell);
graph.setSelectionCell(dropCell);
return null;
}
}
}
return super.importCells(cells, dx, dy, target, location);
}

Related

How to change what block is at certain coordinates?

I am trying to recreate the mod in the YouTuber TommyInnit's video "Minecraft’s Laser Eye Mod Is Hilarious" as me and my friends wish to use it but we couldn't find it on the internet, and I have taken code from here for raycasting and also set up a keybind, but I cannot figure out how to setb the block you are looking at. I have tried to manage to get the block and set it but I can only find how to make new blocks that don't yet exist. My code is the following, with the block code being on line 142:
package net.laser.eyes;
import org.lwjgl.glfw.GLFW;
import net.minecraft.client.options.KeyBinding;
import net.minecraft.client.util.InputUtil;
import net.minecraft.text.LiteralText;
import net.fabricmc.api.ModInitializer;
import net.fabricmc.fabric.api.client.keybinding.v1.KeyBindingHelper;
import net.fabricmc.fabric.api.event.client.ClientTickCallback;
import net.fabricmc.api.*;
import net.fabricmc.fabric.api.client.rendering.v1.*;
import net.minecraft.block.*;
import net.minecraft.client.*;
import net.minecraft.client.gui.*;
import net.minecraft.client.util.math.*;
import net.minecraft.entity.*;
import net.minecraft.entity.decoration.*;
import net.minecraft.entity.projectile.*;
import net.minecraft.text.*;
import net.minecraft.util.hit.*;
import net.minecraft.util.math.*;
import net.minecraft.world.*;
public class main implements ModInitializer {
#Override
public void onInitialize() {
KeyBinding binding1 = KeyBindingHelper.registerKeyBinding(new KeyBinding("key.laser-eyes.shoot", InputUtil.Type.KEYSYM, GLFW.GLFW_KEY_R, "key.category.laser.eyes"));
HudRenderCallback.EVENT.register(main::displayBoundingBox);
ClientTickCallback.EVENT.register(client -> {
while (binding1.wasPressed()) {
client.player.sendMessage(new LiteralText("Key 1 was pressed!"), false);
}
});
}
private static long lastCalculationTime = 0;
private static boolean lastCalculationExists = false;
private static int lastCalculationMinX = 0;
private static int lastCalculationMinY = 0;
private static int lastCalculationWidth = 0;
private static int lastCalculationHeight = 0;
private static void displayBoundingBox(MatrixStack matrixStack, float tickDelta) {
long currentTime = System.currentTimeMillis();
if(lastCalculationExists && currentTime - lastCalculationTime < 1000/45) {
drawHollowFill(matrixStack, lastCalculationMinX, lastCalculationMinY,
lastCalculationWidth, lastCalculationHeight, 2, 0xffff0000);
return;
}
lastCalculationTime = currentTime;
MinecraftClient client = MinecraftClient.getInstance();
int width = client.getWindow().getScaledWidth();
int height = client.getWindow().getScaledHeight();
Vec3d cameraDirection = client.cameraEntity.getRotationVec(tickDelta);
double fov = client.options.fov;
double angleSize = fov/height;
Vector3f verticalRotationAxis = new Vector3f(cameraDirection);
verticalRotationAxis.cross(Vector3f.POSITIVE_Y);
if(!verticalRotationAxis.normalize()) {
lastCalculationExists = false;
return;
}
Vector3f horizontalRotationAxis = new Vector3f(cameraDirection);
horizontalRotationAxis.cross(verticalRotationAxis);
horizontalRotationAxis.normalize();
verticalRotationAxis = new Vector3f(cameraDirection);
verticalRotationAxis.cross(horizontalRotationAxis);
HitResult hit = client.crosshairTarget;
if (hit.getType() == HitResult.Type.MISS) {
lastCalculationExists = false;
return;
}
int minX = width;
int maxX = 0;
int minY = height;
int maxY = 0;
for(int y = 0; y < height; y +=2) {
for(int x = 0; x < width; x+=2) {
if(minX < x && x < maxX && minY < y && y < maxY) {
continue;
}
Vec3d direction = map(
(float) angleSize,
cameraDirection,
horizontalRotationAxis,
verticalRotationAxis,
x,
y,
width,
height
);
HitResult nextHit = rayTraceInDirection(client, tickDelta, direction);//TODO make less expensive
if(nextHit == null) {
continue;
}
if(nextHit.getType() == HitResult.Type.MISS) {
continue;
}
if(nextHit.getType() != hit.getType()) {
continue;
}
if (nextHit.getType() == HitResult.Type.BLOCK) {
if(!((BlockHitResult) nextHit).getBlockPos().equals(((BlockHitResult) hit).getBlockPos())) {
continue;
}
} else if(nextHit.getType() == HitResult.Type.ENTITY) {
if(!((EntityHitResult) nextHit).getEntity().equals(((EntityHitResult) hit).getEntity())) {
continue;
}
}
if(minX > x) minX = x;
if(minY > y) minY = y;
if(maxX < x) maxX = x;
if(maxY < y) maxY = y;
}
}
lastCalculationExists = true;
lastCalculationMinX = minX;
lastCalculationMinY = minY;
lastCalculationWidth = maxX - minX;
lastCalculationHeight = maxY - minY;
drawHollowFill(matrixStack, minX, minY, maxX - minX, maxY - minY, 2, 0xffff0000);
LiteralText text = new LiteralText("Bounding " + minX + " " + minY + " " + width + " " + height + ": ");
client.player.sendMessage(text.append(getLabel(hit)), true);
//SET THE BLOCK (maybe use hit.getPos(); to find it??)
}
private static void drawHollowFill(MatrixStack matrixStack, int x, int y, int width, int height, int stroke, int color) {
matrixStack.push();
matrixStack.translate(x-stroke, y-stroke, 0);
width += stroke *2;
height += stroke *2;
DrawableHelper.fill(matrixStack, 0, 0, width, stroke, color);
DrawableHelper.fill(matrixStack, width - stroke, 0, width, height, color);
DrawableHelper.fill(matrixStack, 0, height - stroke, width, height, color);
DrawableHelper.fill(matrixStack, 0, 0, stroke, height, color);
matrixStack.pop();
}
private static Text getLabel(HitResult hit) {
if(hit == null) return new LiteralText("null");
switch (hit.getType()) {
case BLOCK:
return getLabelBlock((BlockHitResult) hit);
case ENTITY:
return getLabelEntity((EntityHitResult) hit);
case MISS:
default:
return new LiteralText("null");
}
}
private static Text getLabelEntity(EntityHitResult hit) {
return hit.getEntity().getDisplayName();
}
private static Text getLabelBlock(BlockHitResult hit) {
BlockPos blockPos = hit.getBlockPos();
BlockState blockState = MinecraftClient.getInstance().world.getBlockState(blockPos);
Block block = blockState.getBlock();
return block.getName();
}
private static Vec3d map(float anglePerPixel, Vec3d center, Vector3f horizontalRotationAxis,
Vector3f verticalRotationAxis, int x, int y, int width, int height) {
float horizontalRotation = (x - width/2f) * anglePerPixel;
float verticalRotation = (y - height/2f) * anglePerPixel;
final Vector3f temp2 = new Vector3f(center);
temp2.rotate(verticalRotationAxis.getDegreesQuaternion(verticalRotation));
temp2.rotate(horizontalRotationAxis.getDegreesQuaternion(horizontalRotation));
return new Vec3d(temp2);
}
private static HitResult rayTraceInDirection(MinecraftClient client, float tickDelta, Vec3d direction) {
Entity entity = client.getCameraEntity();
if (entity == null || client.world == null) {
return null;
}
double reachDistance = 5.0F;
HitResult target = rayTrace(entity, reachDistance, tickDelta, false, direction);
boolean tooFar = false;
double extendedReach = 6.0D;
reachDistance = extendedReach;
Vec3d cameraPos = entity.getCameraPosVec(tickDelta);
extendedReach = extendedReach * extendedReach;
if (target != null) {
extendedReach = target.getPos().squaredDistanceTo(cameraPos);
}
Vec3d vec3d3 = cameraPos.add(direction.multiply(reachDistance));
Box box = entity
.getBoundingBox()
.stretch(entity.getRotationVec(1.0F).multiply(reachDistance))
.expand(1.0D, 1.0D, 1.0D);
EntityHitResult entityHitResult = ProjectileUtil.raycast(
entity,
cameraPos,
vec3d3,
box,
(entityx) -> !entityx.isSpectator() && entityx.collides(),
extendedReach
);
if (entityHitResult == null) {
return target;
}
Entity entity2 = entityHitResult.getEntity();
Vec3d hitPos = entityHitResult.getPos();
if (cameraPos.squaredDistanceTo(hitPos) < extendedReach || target == null) {
target = entityHitResult;
if (entity2 instanceof LivingEntity || entity2 instanceof ItemFrameEntity) {
client.targetedEntity = entity2;
}
}
return target;
}
private static HitResult rayTrace(
Entity entity,
double maxDistance,
float tickDelta,
boolean includeFluids,
Vec3d direction
) {
Vec3d end = entity.getCameraPosVec(tickDelta).add(direction.multiply(maxDistance));
return entity.world.raycast(new RaycastContext(
entity.getCameraPosVec(tickDelta),
end,
RaycastContext.ShapeType.OUTLINE,
includeFluids ? RaycastContext.FluidHandling.ANY : RaycastContext.FluidHandling.NONE,
entity
));
}
}
Firstly, I heavily recommend following the standard Java naming conventions as it will make your code more understandable for others.
The technical name for a block present in the world at a specific position is a "Block State", represented by the BlockState class.
You can only change the block state at a specific position on the server-side. Your raycasting code in ran on the client-side, so you need to use the Fabric Networking API. You can see the server-side Javadoc here and the client-side Javadoc here.
Thankfully, Fabric Wiki has a networking tutorial so you don't have to read all that Javadoc. The part that you're interested in is "sending packets to the server and receiving packets on the server".
Here's a guide specific for your use case:
Introduction to Networking
Minecraft operates in two different components; the client and the server.
The client is responsible for doing jobs such as rendering and GUIs, while the server is responsible for handling the world storage, entity AI etc. (talking about logical client and server here)
The physical server and physical client are the actual JAR files that are run.
The physical (dedicated) server contains only the logical server, while a physical client contains both a logical (integrated) server and a logical client.
A diagram that explains it can be found here.
So, the logical client cannot change the state of the logical server (e.g. block states in a world), so packets have to be sent from the client to the server in order for the server to respond.
The following code is only example code, and you shouldn't copy it! You should think about safety precautions like preventing cheat clients from changing every block. Probably one of the most important rules in networking: assume the client is lying.
The Fabric Networking API
Your starting points are ServerPlayNetworking and ClientPlayNetworking. They are the classes that help you send and receive packets.
Register a listener using registerGlobalReceiver, and send a packet by using send.
You first need an Identifier in order to separate your packet from other packets and make sure it is interpreted correctly. An Identifier like this is recommended to be put in a static field in your ModInitializer or a utility class.
public class MyMod implements ModInitializer {
public static final Identifier SET_BLOCK_PACKET = new Identifier("modid", "setblock");
}
(Don't forget to replace modid with your mod ID)
You usually want to pass data with your packets (e.g. block position and block to change to), and you can do so with a PacketByteBuf.
Let's Piece This all Together
So, we have an Identifier. Let's send a packet!
Client-Side
We will start by creating a PacketByteBuf and writing the correct data:
private static void displayBoundingBox(MatrixStack matrixStack, float tickDelta) {
// ...
PacketByteBuf data = PacketByteBufs.create();
buf.writeBlockPos(hit.getPos());
// Minecraft doesn't have a way to write a Block to a packet, so we will write the registry name instead
buf.writeIdentifier(new Identifier("minecraft", "someblock" /*for example, "stone"*/));
}
And now sending the packet
// ...
ClientPlayNetworking.send(SET_BLOCK_PACKET, buf);
Server-Side
A packet with the SET_BLOCK_PACKET ID has been sent, But we also need to listen and receive it on the server-side. We can do that by using ServerPlayNetworking.registerGlobalReceiver:
#Override
public void onInitialize() {
// ...
// This code MUST be in onInitialize
ServerPlayNetworking.registerGlobalReceiver(SET_BLOCK_PACKET, (server, player, handler, buf, sender) -> {
});
}
We are using a lambda expression here. For more info about lambdas, Google is your friend.
When receiving a packet, code inside your lambda will be executed on the network thread. This code is not allowed to modify anything related to in-game logic (i.e. the world). For that, we will use server.execute(Runnable).
You should read the buf on the network thread, though.
ServerPlayNetworking.registerGlobalReceiver(SET_BLOCK_PACKET, (server, player, handler, buf, sender) -> {
BlockPos pos = buf.readBlockPos(); // reads must be done in the same order
Block blockToSet = Registry.BLOCK.get(buf.readIdentifier()); // reading using the identifier
server.execute(() -> { // We are now on the main thread
// In a normal mod, checks will be done here to prevent the client from setting blocks outside the world etc. but this is only example code
player.getServerWorld().setBlockState(pos, blockToSet.getDefaultState()); // changing the block state
});
});
Once again, you should prevent the client from sending invalid locations

XNA 4.0 Camera and object handling on screen

For developing a side-scrolling platform 2D game I want to implement a moving camera class, the reason of using the class instead of moving the whole map is that I'll have to use too many objects at once witch will cause a lag. I cannot let that happen.
There's a nice algorithm for handling the camera, when player is moving further than the width of the screen then camera moves on players direction until he is once again in the middle of the screen, I've been working several days for making this algorithm work however there's been no success.
// Main
public class Camera
{
protected float _zoom;
protected Matrix _transform;
protected Matrix _inverseTransform;
//The zoom scalar (1.0f = 100% zoom level)
public float Zoom
{
get { return _zoom; }
set { _zoom = value; }
}
// Camera View Matrix Property
public Matrix Transform
{
get { return _transform; }
set { _transform = value; }
}
// Inverse of the view matrix,
// can be used to get
// objects screen coordinates
// from its object coordinates
public Matrix InverseTransform
{
get { return _inverseTransform; }
}
public Vector2 Pos;
// Constructor
public Camera()
{
_zoom = 2.4f;
Pos = new Vector2(0, 0);
}
// Update
public void Update(GameTime gameTime)
{
//Clamp zoom value
_zoom = MathHelper.Clamp(_zoom, 0.0f, 10.0f);
//Create view matrix
_transform = Matrix.CreateScale(new Vector3(_zoom, _zoom, 1)) *
Matrix.CreateTranslation(Pos.X, Pos.Y, 0);
//Update inverse matrix
_inverseTransform = Matrix.Invert(_transform);
}
}
This is the camera class I made for handling the screen, it's main purpose is to resize the screen, more precisely to zoom in and out whenever I want to change my screen, (Title screen, Playing screen, Game over, and like that.)
Moving the camera is quite simple with keys, like this.
if (keyState.IsKeyDown(Keys.D))
Cam.Pos.X -= 20;
if (keyState.IsKeyDown(Keys.A))
Cam.Pos.X += 20;
if (keyState.IsKeyDown(Keys.S))
Cam.Pos.Y -= 20;
if (keyState.IsKeyDown(Keys.W))
Cam.Pos.Y += 20;
And ofc. the drawing method witch apply the camera.
spriteBatch.Begin(SpriteSortMode.Texture, BlendState.AlphaBlend, null, null, null, null, Cam.Transform);
Here comes the part when I stop, so what I want to do is make something like 2 2D rooms. By Room I mean the place where I usually place objects. like this "Vector2(74, 63)" So I want to create a place where I could draw items that would stick to the screen and wouldn't move, and make the screen bounds that would make my algorithm to work, witch will be always on screen and as an addition it will check if one of the borders of the screen "room" reaches the certain coordinates of the map "room".
I think that the reason for that would be obvious because I don't want player to move camera outside the map when he reaches the wall, otherwise the player would already see a part of the next map where he will be transformed.
The reason of drawing both maps next to each other is again to reduce the loading time so player wouldn't have to wait for playing the next map.
Alright, so I've run into more troubles than I expected so I'll add extra information and will start with the player class:
// Main
public class Player
{
public Texture2D AureliusTexture;
public Vector2 position;
public Vector2 velocity;
public Vector2 PosForTheCam; // Variable that holds value for moving the camera
protected Vector2 dimensions;
protected CollisionPath attachedPath;
const float GRAVITY = 18.0f;
const float WALK_VELOCITY = 120f;
const float JUMP_VELOCITY = -425.0f;
// Constructor
public Player()
{
dimensions = new Vector2(23, 46);
position = new Vector2(50, 770);
}
public void Update(float deltaSeconds, List<CollisionPath> collisionPaths)
{
#region Input handling
KeyboardState keyState = Keyboard.GetState();
if (keyState.IsKeyDown(Keys.Left))
{
velocity.X = -WALK_VELOCITY;
}
else if (keyState.IsKeyDown(Keys.Right))
{
velocity.X = WALK_VELOCITY;
}
else
{
velocity.X = 0;
}
if (attachedPath != null && keyState.IsKeyDown(Keys.Space))
{
velocity.Y = JUMP_VELOCITY;
attachedPath = null;
}
velocity.Y += GRAVITY;
#endregion
#region Region of handling the camera based on Player
PosForTheCam.X = velocity.X;
#endregion
#region Collision checking
if (velocity.Y >= 0)
{
if (attachedPath != null)
{
position.X += velocity.X * deltaSeconds;
position.Y = attachedPath.InterpolateY(position.X) - dimensions.Y / 2;
velocity.Y = 0;
if (position.X < attachedPath.MinimumX || position.X > attachedPath.MaximumX)
{
attachedPath = null;
}
}
else
{
Vector2 footPosition = position + new Vector2(0, dimensions.Y / 2);
Vector2 expectedFootPosition = footPosition + velocity * deltaSeconds;
CollisionPath landablePath = null;
float landablePosition = float.MaxValue;
foreach (CollisionPath path in collisionPaths)
{
if (expectedFootPosition.X >= path.MinimumX && expectedFootPosition.X <= path.MaximumX)
{
float pathOldY = path.InterpolateY(footPosition.X);
float pathNewY = path.InterpolateY(expectedFootPosition.X);
if (footPosition.Y <= pathOldY && expectedFootPosition.Y >= pathNewY && pathNewY < landablePosition)
{
landablePath = path;
landablePosition = pathNewY;
}
}
}
if (landablePath != null)
{
velocity.Y = 0;
footPosition.Y = landablePosition;
attachedPath = landablePath;
position.X += velocity.X * deltaSeconds;
position.Y = footPosition.Y - dimensions.Y / 2;
}
else
{
position = position + velocity * deltaSeconds;
}
}
}
else
{
position += velocity * deltaSeconds;
attachedPath = null;
}
#endregion
}
}
So I state it clear that I asked my friend to do most of it because I wanted to handle the gravity and the slopes so we made it work similar like in Unity. And he happened to know how to do that.
And so I'll add the Update method that handles the camera from the Main Class.
MM.Update(gameTime); // Map Managher update function for map handling
Cam.Update(gameTime); // Camera update
Cam.Zoom = 2.4f; // Sets the zoom level for the title screen
// Takes the start position for camera in map and then turns off the update
// so the camera position can be changed. Else it would just keep an infinite
// loop and we couldn't change the camera.
if (StartInNewRoom)
{
Cam.Pos = MM.CameraPosition; // Applys the camera position value from the map manager class
StartInNewRoom = false;
}
I am unsure how to handle the camera, like I used your method and the result often ended up that camera moves by itself or it doesn't move at all.
If you don't want objects to move with the camera like a HUD you need a second spriteBatch.Begin() without your camera matrix which you draw after your actual scene.
To make the camera not move out of the map you could use some kind of collision detection. Just calculate the right border of your camera. It depends where the origin of your camera is.
Is your camera matrix working like this? Because the position should be negative or it will move in the wrong direction.
This is how mine looks like.
return Matrix.CreateTranslation(new Vector3(-camera.position.X, -camera.position.Y, 0)) *
Matrix.CreateRotationZ(Rotation) * Matrix.CreateScale(Zoom) *
Matrix.CreateTranslation(new Vector3(Viewport.Width * 0.5f, Viewport.Height * 0.5f, 0));
Viewport.Width/Height * 0.5 centers you camera.
You can also apply this behind your Pos.X/Y
To Camera follows player
public void Update(Player player)
{
//Clamp zoom value
_zoom = MathHelper.Clamp(_zoom, 0.0f, 10.0f);
//Create view matrix
_transform = Matrix.CreateScale(new Vector3(_zoom, _zoom, 1)) *
Matrix.CreateTranslation(player.Pos.X, player.Pos.Y, 0);
//Update inverse matrix
_inverseTransform = Matrix.Invert(_transform);
}

My current GPS Position under Nutiteq is not properly updated

as basis for my GPS functionality I've taken HelloMap3D Example of Nutiteq (Thx Jaak) and I adapted to show my current GPS position light different of this example, so, no growing yelow circles but a fix blue translucent circle with a center point as my current Position and works fine except the update. It should erase the past position if location is changed, so that
this update happens as in the example in the method onLocationChanged
This is the code in my Main Activity
protected void initGps(final MyLocationCircle locationCircle) {
final Projection proj = mapView.getLayers().getBaseLayer().getProjection();
locationListener = new LocationListener() {
#Override
public void onLocationChanged(Location location) {
locationCircle.setLocation(proj, location);
locationCircle.setVisible(true);
}
// Another Methods...
}
}
I have adapted MyLocationCircle Class like this
public void update() {
//Draw center with a drawable
Bitmap bitmapPosition = BitmapFactory.decodeResource(activity.getResources(), R.drawable.ic_home);
PointStyle pointStyle = PointStyle.builder().setBitmap(bitmapPosition).setColor(Color.BLUE).build();
// Create/update Point
if ( point == null ) {
point = new Point(circlePos, null, pointStyle, null);
layer.add(point);
} else { // We just have to change the Position to actual Position
point.setMapPos(circlePos);
}
point.setVisible(visible);
// Build closed circle
circleVerts.clear();
for (float tsj = 0; tsj <= 360; tsj += 360 / NR_OF_CIRCLE_VERTS) {
MapPos mapPos = new MapPos(circleScale * Math.cos(tsj * Const.DEG_TO_RAD) + circlePos.x, circleScale * Math.sin(tsj * Const.DEG_TO_RAD) + circlePos.y);
circleVerts.add(mapPos);
}
// Create/update line
if (circle == null) {
LineStyle lineStyle = LineStyle.builder().setWidth(0.05f).setColor(Color.BLUE).build();
PolygonStyle polygonStyle = PolygonStyle.builder().setColor(Color.BLUE & 0x80FFFFFF).setLineStyle(lineStyle).build();//0xE0FFFF
circle = new Polygon(circleVerts, null, polygonStyle, circle_data);
layer.add(circle);
} else {
circle.setVertexList(circleVerts);
}
circle.setVisible(visible);
}
public void setVisible(boolean visible) {
this.visible = visible;
}
public void setLocation(Projection proj, Location location) {
circlePos = proj.fromWgs84(location.getLongitude(), location.getLatitude());
projectionScale = (float) proj.getBounds().getWidth();
circleRadius = location.getAccuracy();
// Here is the most important modification
update();
}
So, each time our Position changes is called onLocationChanged(Location location) Method and there will be called locationCircle.setLocation(location) and last there, it will be called update called.
The questions are, What am I making wrong? and How can I solve it?
Thank you in advance.
You create and add new circle with every update. You should reuse single one, just update vertexes with setVertexList(). In particular this line should be outside onLocationChanged cycle, somewhere in initGPS perhaps:
circle = new Polygon(circleVerts, null, polygonStyle, circle_data);

How to find the Joint coordinates(X,Y,Z) ,also how to draw a locus of the tracked joint?

I am trying to develop a logic to recognize a circle which is made by users right hand, I got the code to draw the skeleton and track from the sample code,
private void SensorSkeletonFrameReady(object sender, SkeletonFrameReadyEventArgs e)
{
Skeleton[] skeletons = new Skeleton[0];
using (SkeletonFrame skeletonFrame = e.OpenSkeletonFrame())
{
if (skeletonFrame != null)
{
skeletons = new Skeleton[skeletonFrame.SkeletonArrayLength];
skeletonFrame.CopySkeletonDataTo(skeletons);
}
}
using (DrawingContext dc = this.drawingGroup.Open())
{
// Draw a transparent background to set the render size
dc.DrawRectangle(Brushes.Black, null, new Rect(0.0, 0.0, RenderWidth, RenderHeight));
if (skeletons.Length != 0)
{
foreach (Skeleton skel in skeletons)
{
RenderClippedEdges(skel, dc);
if (skel.TrackingState == SkeletonTrackingState.Tracked)
{
this.DrawBonesAndJoints(skel, dc);
}
else if (skel.TrackingState == SkeletonTrackingState.PositionOnly)
{
dc.DrawEllipse(
this.centerPointBrush,
null,
this.SkeletonPointToScreen(skel.Position),
BodyCenterThickness,
BodyCenterThickness);
}
}
}
// prevent drawing outside of our render area
this.drawingGroup.ClipGeometry = new RectangleGeometry(new Rect(0.0, 0.0, RenderWidth, RenderHeight));
}
}
What I want to do now is to track the coordinates of users right hand for gesture recognition,
Here is how I am planning to get the job done:
Start the gesture
Draw the circled gesture, Make sure to store the coordinates for start and then keep noting the coordinates for every 45 degree shift of the Joint from the start, for 8 octants we will get 8 samples.
For making a decision that a circle was drawn we can just check the relation ship between the eight samples.
Also, in the depthimage I want to show the locus of the drawn gesture, so as the handpoint moves it leaves a trace behind so at the end we will get a figure which was drawn by an user. I have no idea how to achieve this.
Coordinates for each joint are available for each tracked skeleton during each SkeletonFrameReady event. Inside your foreach loop...
foreach (Skeleton skeleton in skeletons) {
// get the joint
Joint rightHand = skeleton.Joints[JointType.HandRight];
// get the individual points of the right hand
double rightX = rightHand.Position.X;
double rightY = rightHand.Position.Y;
double rightZ = rightHand.Position.Z;
}
You can look at the JointType enum to pull out any of the joints and work with the individual coordinates.
To draw your gesture trail you can use the DrawContext you have in your example or use another way to draw a Path onto the visual layer. With your x/y/z values, you would need to scale them to the window coordinates. The "Coding4Fun" library offers a pre-built function to do it; alternatively you can write your own, for example:
private static double ScaleY(Joint joint)
{
double y = ((SystemParameters.PrimaryScreenHeight / 0.4) * -joint.Position.Y) + (SystemParameters.PrimaryScreenHeight / 2);
return y;
}
private static void ScaleXY(Joint shoulderCenter, bool rightHand, Joint joint, out int scaledX, out int scaledY)
{
double screenWidth = SystemParameters.PrimaryScreenWidth;
double x = 0;
double y = ScaleY(joint);
// if rightHand then place shouldCenter on left of screen
// else place shouldCenter on right of screen
if (rightHand)
{
x = (joint.Position.X - shoulderCenter.Position.X) * screenWidth * 2;
}
else
{
x = screenWidth - ((shoulderCenter.Position.X - joint.Position.X) * (screenWidth * 2));
}
if (x < 0)
{
x = 0;
}
else if (x > screenWidth - 5)
{
x = screenWidth - 5;
}
if (y < 0)
{
y = 0;
}
scaledX = (int)x;
scaledY = (int)y;
}

How to scroll axis scale outside the graph of Zedgraph using the mouse event

Is it possible that the axis scale outside the graph could be scale using the mouse event "mouse_down and hold" and move up or down in y-axis the same with the x-axis move left or right? ex. when I trigger MouseDownEvent and hold the x-axis scale 0.6 or at the space along with that scale and move it to the right, scale should scroll depend in the chartfraction? could you post an example? Thanks in advance!
Separately panning and zooming Y axises can be achieved using the mouse events of ZedGraph: MouseDownEvent, MouseMoveEvent, MouseUpEvent and MouseWheel events (credits go to a colleague of mine).
It works with multiple GraphPanes and multiple Y axises.
The MouseMoveEvent is used to shift the Min and the Max of an Y axis when the mouse is moved while its button is pressed. If not, it is used to get the reference of the Y axis object the mouse is hovering on.
The MouseDownEvent is used to initiate an axis pan operation.
The MouseWheel is used to perform a zoom on an Y axis.
And the MouseUpEvent is used to clean things when zooming and panning operations are finished.
Here is the code :
// The axis that is currently hovered by the mouse
YAxis hoveredYAxis;
// The graphpane that contains the axis
GraphPane foundPane;
// The scale of the axis before it is panned
double movedYAxisMin;
double movedYAxisMax;
// The Y on the axis when the panning operation is starting
float movedYAxisStartY;
void z_MouseWheel(object sender, MouseEventArgs e)
{
if (hoveredYAxis != null)
{
var direction = e.Delta < 1 ? -.05f : .05f;
var increment = direction * (hoveredYAxis.Scale.Max - hoveredYAxis.Scale.Min);
var newMin = hoveredYAxis.Scale.Min + increment;
var newMax = hoveredYAxis.Scale.Max - increment;
hoveredYAxis.Scale.Min = newMin;
hoveredYAxis.Scale.Max = newMax;
foundPane.AxisChange();
z.Invalidate();
}
}
bool z_MouseUpEvent(ZedGraphControl sender, MouseEventArgs e)
{
hoveredYAxis = null;
return false;
}
bool z_MouseMoveEvent(ZedGraphControl sender, MouseEventArgs e)
{
var pt = e.Location;
if (e.Button == System.Windows.Forms.MouseButtons.Left)
{
if (hoveredYAxis != null)
{
var yOffset = hoveredYAxis.Scale.ReverseTransform(pt.Y) - hoveredYAxis.Scale.ReverseTransform(movedYAxisStartY);
hoveredYAxis.Scale.Min = movedYAxisMin - yOffset;
hoveredYAxis.Scale.Max = movedYAxisMax - yOffset;
sender.Invalidate();
return true;
}
}
else
{
var foundObject = findZedGraphObject(null);
hoveredYAxis = foundObject as YAxis;
if (hoveredYAxis != null)
{
z.Cursor = Cursors.SizeNS;
return true;
}
else
{
if (z.IsShowPointValues)
{
z.Cursor = Cursors.Cross;
return false;
}
else
{
z.Cursor = Cursors.Default;
return true;
}
}
}
return false;
}
bool z_MouseDownEvent(ZedGraphControl sender, MouseEventArgs e)
{
if (e.Button == System.Windows.Forms.MouseButtons.Left)
{
if (hoveredYAxis != null)
{
movedYAxisStartY = e.Location.Y;
movedYAxisMin = hoveredYAxis.Scale.Min;
movedYAxisMax = hoveredYAxis.Scale.Max;
return true;
}
}
return false;
}
This is a helper that factorizes a bit the object find operations of ZedGraph.
object findZedGraphObject(GraphPane pane = null)
{
var pt = zgc.PointToClient(Control.MousePosition);
if (pane == null)
{
foundPane = zgc.MasterPane.FindPane(pt);
if (foundPane != null)
{
object foundObject;
int forget;
using (var g = zgc.CreateGraphics())
if (foundPane.FindNearestObject(pt, g, out foundObject, out forget))
return foundObject;
}
}
return null;
}
If I understand your question correctly, here's my response:
zedgraph has got an in-built function called "Pan", you could change the scale of x & y axis.
Place the cursor within the 'chart area'
Hold the 'ctrl' button & move the mouse towards x & y directions to change the scale.
you could get back to original state by 'Un-Pan' (Context Menu)
Cheers..:)
Do You want to create a ScrollBar?
zedGraphControl1.IsShowHScrollbar = true;
//Set borders for the scale
zedGraphControl1.GraphPane.XAxis.Scale.Max = Xmax;
zedGraphControl1.GraphPane.XAxis.Scale.Min = Xmin;