I´m working on a Box2D Jump and run game and want the player to be teleported to the start when he hits a spike, but when he hits the spike, I get a null pointer exception.
Heres my Contact class:
public class WorldContactListener implements ContactListener {
Player player;
#Override
public void beginContact(Contact contact) {
Fixture fixA = contact.getFixtureA();
Fixture fixB = contact.getFixtureB();
player = new Player();
int cDef = fixA.getFilterData().categoryBits | fixB.getFilterData().categoryBits;
switch (cDef) {
case HardwareRunner.PLAYER_BIT | HardwareRunner.BRICK_BIT:
case HardwareRunner.PLAYER_BIT | HardwareRunner.SPIKE_BIT:
player.die();
}
}
And heres the part of my player class:
public void definePlayer(){
bdef.position.set(32 / runner.PPM, (6 * 32) / runner.PPM);
bdef.type = BodyDef.BodyType.DynamicBody;
b2body = world.createBody(bdef);
FixtureDef fdef = new FixtureDef();
PolygonShape shape = new PolygonShape();
Vector2[] vertice = new Vector2[4];
vertice[0] = new Vector2(-13, 13).scl(1 / runner.PPM);
vertice[1] = new Vector2(13, 13).scl(1 / runner.PPM);
vertice[2] = new Vector2(13, -13).scl(1 / runner.PPM);
vertice[3] = new Vector2(-13, -13).scl(1 / runner.PPM);
shape.set(vertice);
shape.getRadius();
fdef.filter.categoryBits = HardwareRunner.PLAYER_BIT;
fdef.filter.maskBits =
HardwareRunner.GROUND_BIT |
HardwareRunner.COIN_BIT |
HardwareRunner.BRICK_BIT |
HardwareRunner.ENEMY_BIT |
HardwareRunner.SPIKE_BIT |
HardwareRunner.ENEMY_HEAD_BIT |
HardwareRunner.ITEM_BIT;
fdef.shape = shape;
fdef.friction = .1f;
b2body.createFixture(fdef).setUserData(this);
fdef.isSensor = true;
b2body.createFixture(fdef).setUserData(this);
}
public void die() {
world.destroyBody(b2body);
bdef.position.set(32 / runner.PPM, (6 * 32) / runner.PPM);
bdef.type = BodyDef.BodyType.DynamicBody;
b2body = world.createBody(bdef);
FixtureDef fdef = new FixtureDef();
PolygonShape shape = new PolygonShape();
Vector2[] vertice = new Vector2[4];
vertice[0] = new Vector2(-13, 13).scl(1 / runner.PPM);
vertice[1] = new Vector2(13, 13).scl(1 / runner.PPM);
vertice[2] = new Vector2(13, -13).scl(1 / runner.PPM);
vertice[3] = new Vector2(-13, -13).scl(1 / runner.PPM);
shape.set(vertice);
shape.getRadius();
fdef.filter.categoryBits = HardwareRunner.PLAYER_BIT;
fdef.filter.maskBits =
HardwareRunner.GROUND_BIT |
HardwareRunner.COIN_BIT |
HardwareRunner.BRICK_BIT |
HardwareRunner.ENEMY_BIT |
HardwareRunner.SPIKE_BIT |
HardwareRunner.ENEMY_HEAD_BIT |
HardwareRunner.ITEM_BIT;
fdef.shape = shape;
fdef.friction = .1f;
b2body.createFixture(fdef).setUserData(this);
fdef.isSensor = true;
b2body.createFixture(fdef).setUserData(this);
}
And the error is:
Exception in thread "LWJGL Application" java.lang.NullPointerException
at de.tobls.hardwarerunner.Sprites.Player.die(Player.java:223)
at de.tobls.hardwarerunner.Tools.WorldContactListener.beginContact(WorldContactListener.java:27)
at com.badlogic.gdx.physics.box2d.World.beginContact(World.java:982)
at com.badlogic.gdx.physics.box2d.World.jniStep(Native Method)
at com.badlogic.gdx.physics.box2d.World.step(World.java:686)
at de.tobls.hardwarerunner.Screens.PlayScreen.update(PlayScreen.java:112)
at de.tobls.hardwarerunner.Screens.PlayScreen.render(PlayScreen.java:127)
at com.badlogic.gdx.Game.render(Game.java:46)
at de.tobls.hardwarerunner.HardwareRunner.render(HardwareRunner.java:71)
at com.badlogic.gdx.backends.lwjgl.LwjglApplication.mainLoop(LwjglApplication.java:215)
at com.badlogic.gdx.backends.lwjgl.LwjglApplication$1.run(LwjglApplication.java:120)
Can anyone help me?
First of all you should not destroy the worlds items in asynchronous methods - notice that contact listeners's methods are asynchronous.
The proper way to destroy the body od to mark it and then destroy right before world.update()
//in asynchronous method
body.setUserData("DESTROY");
//when rendering
for(Body body : world.getBodies())
if(body.getUserData().equals("DESTROY")) world.destroyBody(body);
But on the top of this - I see that you are doing nothing but moving the body when die so why not to use setTransform method then
body.setTransform(new Vector2(newX, newY), body.getAngle() );
Instead of removing body and creating it again in a new position
Related
I'm trying to set dimension to the elements in a AssemblyInstance. The code operates with coordinates from the first element.
AssemblyInstance ass; //is found and is not null
ViewSection vsec = RevitAuxilaries.CreateAssemblyViewSection(uiapp, ass,
AssemblyDetailViewOrientation.ElevationFront, ElementId.InvalidElementId, 25);
//UIApplication,
AssemblyInstance, AssemblyDetailViewOrientation, TemplateId, scale // created
BoundingBoxXYZ bbox1 = ass.get_BoundingBox(uiapp.ActiveUIDocument.ActiveView);
XYZ ptmid = (bbox1.Max + bbox1.Min) * 0.5;
Element cropboxelm = RevitAuxilaries.GetViewCropBox(uiapp, vsec); //finds CropBox element,
//found
BoundingBoxXYZ bcropbox = vsec.CropBox;
XYZ center = new XYZ(ptmid.X, ptmid.Y, 0.5 * (bcropbox.Max.Z + bcropbox.Min.Z));
Line axis = Line.CreateBound(center, center + XYZ.BasisZ);
RevitAuxilaries.RotateElement2(uiapp, cropboxelm, axis, 0.6981); // UIApplication,, Element,
Line, angle// created
double dw = RevitAuxilaries.GetDimensionFromElement(uiapp, fi, Dimensions.enWidth); //found dw
// = 3.937
ptleft = new XYZ(31.501, -23.3878, 32.4803);
ptrght = new XYZ(31.501 + dw * Math.Cos(0.6981), -23.3878 + dw * Math.Sin(0.6981), 32.4803);
Line ln = RevitAuxilaries.CreateLineFromPoints(uiapp, ptleft, ptrght); //created
ReferenceArray refarr = new ReferenceArray();
refarr.Append(ln.GetEndPointReference(0));
refarr.Append(ln.GetEndPointReference(1));
Dimension dim = null;
using (Transaction trans = new Transaction(uiapp.ActiveUIDocument.Document, "CreADim"))
{
trans.Start();
dim = uiapp.ActiveUIDocument.Document.Create.NewDimension(viewsec, line, refarr);
if (!issame)
{
try
{ dim.ValueOverride = Convert.ToInt32(UnitUtils.Convert(dim.Value.Value,
UnitTypeId.Feet, UnitTypeId.Millimeters)).ToString(); }
catch { }
}
trans.Commit();
uiapp.ActiveUIDocument.RefreshActiveView();
}
ERROR: The direction of dimension is invalid
Error in function checkDir, line 939
What is here wrong?
Have you tried to create the exact same dimension in the exact same context manually through the end user interface? Does that complete as expected? If not, what error message does that generate? If yes, you can analyse the resulting model, its elements and their properties using RevitLookup and possibly discover some required settings that can be added to your API approach.
I am currently trying to implement the Optaplanner Libraries into an easy TimeWindowedVehicleRoutingSolution Example. My current code is trying to be as simple as possible and is using mostly default stuff like the vehicleRoutingSolverConfig.
public static void main(String[] args) {
SolverFactory<TimeWindowedVehicleRoutingSolution> solverFactory = SolverFactory.createFromXmlResource("test/vehicleRoutingSolverConfig.xml");
solverFactory.getSolverConfig();
Solver<TimeWindowedVehicleRoutingSolution> solver = null;
try{
solver = solverFactory.buildSolver();
}catch(Exception e){
System.out.println(e.toString());
}
// from 12/07/2018 # 12:00pm to 12/07/2018 # 12:30pm
TimeWindowedCustomer hans = new TimeWindowedCustomer();
hans.setReadyTime(1544184000);
hans.setDueTime(1544185800);
// from 12/07/2018 # 11:00pm to 12/07/2018 # 11:30pm
TimeWindowedCustomer detlef = new TimeWindowedCustomer();
detlef.setReadyTime(1544180400);
detlef.setDueTime(1544182200);
RoadLocation hansRoad = new RoadLocation();
RoadLocation detlefRoad = new RoadLocation();
RoadLocation hubRoad = new RoadLocation();
HashMap hansMap = new HashMap<RoadLocation, Double>();
//10min
hansMap.put(detlefRoad, 0.6);
//15min
hansMap.put(hubRoad, 0.9);
hansRoad.setTravelDistanceMap(hansMap);
HashMap detlefMap = new HashMap<RoadLocation, Double>();
//10min
detlefMap.put(hansRoad, 0.6);
//20min
detlefMap.put(hubRoad, 1.2);
detlefRoad.setTravelDistanceMap(detlefMap);
HashMap hubMap = new HashMap<RoadLocation, Double>();
//15min
hubMap.put(hansRoad, 0.9);
//20min
hubMap.put(detlefRoad, 1.2);
hubRoad.setTravelDistanceMap(hubMap);
TimeWindowedDepot hub = new TimeWindowedDepot();
hub.setLocation(hubRoad);
hans.setLocation(hansRoad);
detlef.setLocation(detlefRoad);
Vehicle vehicle = new Vehicle();
vehicle.setDepot(hub);
List<Customer> customers = new ArrayList<Customer>();
customers.add(detlef);
customers.add(hans);
List<Depot> depots = new ArrayList<Depot>();
depots.add(hub);
List<Vehicle> vehicles = new ArrayList<Vehicle>();
vehicles.add(vehicle);
List<Location> locations = new ArrayList<Location>();
locations.add(hansRoad);
locations.add(detlefRoad);
locations.add(hubRoad);
hans.setId(1L);
detlef.setId(2L);
hub.setId(3L);
vehicle.setId(4L);
hansRoad.setId(5L);
detlefRoad.setId(6L);
hubRoad.setId(7L);
TimeWindowedVehicleRoutingSolution problem = new TimeWindowedVehicleRoutingSolution();
problem.setCustomerList(customers);
problem.setDepotList(depots);
problem.setVehicleList(vehicles);
problem.setLocationList(locations);
problem.setDistanceType(DistanceType.ROAD_DISTANCE);
TimeWindowedVehicleRoutingSolution solution = solver.solve(problem);
for(Customer c : solution.getCustomerList()) {
TimeWindowedCustomer tc = (TimeWindowedCustomer) c;
System.out.println(tc.getArrivalTime());
}
}
The Problem I am now facing is that the timewindow isn't really considered a hard rule and therefor the suggested route arrives before the timewindow. I think I am just missing breaks that the drivers could make and from looking at the code I would think that breaks are possible, just not with my configuration. So the question would be, how to get breaks into my example.
I have a camera as child within a Group object and I need to get the origin/direction:
var cameraRig = new THREE.Group();
cameraRig.add( cameraPerspective );
cameraRig.add( cameraOrtho );
scene.add( cameraRig );
function relativeMousePosition () {
var canvasBoundingBox = renderer.domElement.getBoundingClientRect();
var mouse3D = new THREE.Vector3(0, 0, 0.5);
mouse3D.x = ((mouseX - 0) / canvasBoundingBox.width) * 2 - 1;
mouse3D.y = -((mouseY - 0) / canvasBoundingBox.height) * 2 + 1;
return mouse3D;
}
cameraRig.position.set(89,34,91);
cameraRig.lookAt(something.position);
cameraPerspective.position.set(123,345,123);
var dir = relativeMousePosition().unproject(camera).sub(cameraPerspective.position).normalize();
var origin = cameraPerspective.position;
The above code gives a origin + direction with the context of the cameraRig. When I exclude the camera out, having the scene as direct parent, it gives me the world origin/direction which I want. So how to incorporate the cameraRig to get world origin/direction, so I can do picking or whatever?
FIDDLE: https://jsfiddle.net/647qzhab/1/
UPDATE:
As mentioned in the comment by Falk:
var dir = relativeMousePosition().unproject(camera).sub(cameraPerspective.getWorldPosition()).normalize();
var origin = cameraPerspective.getWorldPosition();
The result is better, but not yet fully satisfiing, as the camera rotation seems not applied yet.
I need to update matrixWorld for the cameraRig:
cameraRig.position.set(89,34,91);
cameraRig.lookAt(something.position);
cameraPerspective.position.set(123,345,123);
cameraRig.updateMatrixWorld(true);
cameraPerspective.updateMatrixWorld(true);
Here is the situation. In my app I have an overlay layer that is composed of a transparent PNG. I have replaced the hitarea for the png with a 1x1 image using the following code:
[Bindable]
[Embed(source = "/assets/1x1image.png")]
private var onexonebitmapClass:Class;
private function loadCompleteHandler(event:Event):void
{
// Create the bitmap
var onexonebitmap:BitmapData = new onexonebitmapClass().bitmapData;
var bitmap:Bitmap;
bitmap = event.target.content as Bitmap;
bitmap.smoothing = true;
var _hitarea:Sprite = createHitArea(onexonebitmap, 1);
var rect:flash.geom.Rectangle = _box.toFlexRectangle(sprite.width, sprite.height);
var drawnBox:Sprite = new FlexSprite();
bitmap.width = rect.width;
bitmap.height = rect.height;
bitmap.x = -loader.width / 2;
bitmap.y = -loader.height / 2;
bitmap.alpha = _alpha;
_hitarea.alpha = 0;
drawnBox.x = rect.x + rect.width / 2;
drawnBox.y = rect.y + rect.height / 2;
// Add the bitmap as a child to the drawnBox
drawnBox.addChild(bitmap);
// Rotate the object.
drawnBox.rotation = _rotation;
// Add the drawnBox to the sprite
sprite.addChild(drawnBox);
// Set the hitarea to drawnBox
drawnBox.hitArea = _hitarea;
}
private function createHitArea(bitmapData:BitmapData, grainSize:uint = 1):Sprite
{
var _hitarea:Sprite = new Sprite();
_hitarea.graphics.beginFill(0x900000, 1.0);
for (var x:uint = 0; x < bitmapData.width; x += grainSize)
{
for (var y:uint = grainSize; y < bitmapData.height; y += grainSize)
{
if (x <= bitmapData.width && y <= bitmapData.height && bitmapData.getPixel(x, y) != 0)
{
_hitarea.graphics.drawRect(x, y, grainSize, grainSize);
}
}
}
_hitarea.graphics.endFill();
return _hitarea;
}
This is based off the work done here: Creating a hitarea for PNG Image with transparent (alpha) regions in Flex
Using the above code I am able to basically ignore the overlay layer for all mouse events (click, double click, move, etc.) However, I am unable to capture the right click (context menu) event for items that are beneath the overlay.
For instance I have a spell check component that checks the spelling on any textitem and like most other spell checkers if the word is incorrect or not in the dictionary underlines the word in red and if you right click on it would give you a list of suggestions in the contextmenu. This is working great when the text box is not under the overlay, but if the text box is under the overlay I get nothing back.
If anyone can give me some pointers on how to capture the right click event on a textItem that is under a transparent png that would be great.
I am trying to use manipulation on a UI element (rectangle) and can rotate and translate it without problem. What I would like to achieve is to make another UI element (ellipse for example) to follow the first (rectangle).
If I apply the same transform group -that I used for rectangle- to ellipse, during translation manipulation it works fine but during rotation ellipse does not follow rectangle.
I think I somehow must find a suitable composite transform center Point to provide to ellipse but I can not figure out how.
Here is corresponding sample code.
public MainPage()
{
this.InitializeComponent();
rectMy.ManipulationMode = ManipulationModes.None | ManipulationModes.TranslateX | ManipulationModes.TranslateY | ManipulationModes.Rotate;
rectMy.ManipulationStarted += rectMy_ManipulationStarted;
rectMy.ManipulationDelta += rectMy_ManipulationDelta;
rectMy.ManipulationCompleted += rectMy_ManipulationCompleted;
transformGroup.Children.Add(previousTransform);
transformGroup.Children.Add(compositeTransform);
rectMy.RenderTransform = transformGroup;
}
void rectMy_ManipulationCompleted(object sender, ManipulationCompletedRoutedEventArgs e)
{
e.Handled = true;
}
void rectMy_ManipulationDelta(object sender, ManipulationDeltaRoutedEventArgs e)
{
previousTransform.Matrix = transformGroup.Value;
Point center = previousTransform.TransformPoint(new Point(rectMy.Width / 2, rectMy.Height / 2));
compositeTransform.CenterX = center.X;
compositeTransform.CenterY = center.Y;
compositeTransform.Rotation = e.Delta.Rotation;
compositeTransform.ScaleX = compositeTransform.ScaleY = e.Delta.Scale;
compositeTransform.TranslateX = e.Delta.Translation.X;
compositeTransform.TranslateY = e.Delta.Translation.Y;
}
void rectMy_ManipulationStarted(object sender, ManipulationStartedRoutedEventArgs e)
{
e.Handled = true;
}
OK. I understood it better now and found the solution. It is all about the center point of the composite transform (as I initially guessed). For center of the ellipse, I had to feed the center of rectangle. However the coordinate needed to be given relative to the ellipse. In my case ellipse is at the right upper corner of the rectangle so below is what I have given as composite transform center.
Point centerE = previousTransformE.TransformPoint(new Point(-rectMy.Width / 2 + ellipseMy.Width / 2, rectMy.Height / 2 + ellipseMy.Height / 2));
For rectangle, the center point for composite transform was:
Point center = previousTransform.TransformPoint(new Point(rectMy.Width / 2, rectMy.Height / 2));
Stackoverflow does not allow me to post an image to better visualize the things. Sorry!
The whole code:
previousTransform.Matrix = transformGroup.Value;
previousTransformE.Matrix = transformGroupE.Value;
Point center = previousTransform.TransformPoint(new Point(rectMy.Width / 2, rectMy.Height / 2));
compositeTransform.CenterX = center.X;
compositeTransform.CenterY = center.Y;
compositeTransform.Rotation = e.Delta.Rotation;
compositeTransform.TranslateX = e.Delta.Translation.X;
compositeTransform.TranslateY = e.Delta.Translation.Y;
Point centerE = previousTransformE.TransformPoint(new Point(-rectMy.Width / 2 + ellipseMy.Width / 2, rectMy.Height / 2 + ellipseMy.Height / 2));
compositeTransformE.CenterX = centerE.X;
compositeTransformE.CenterY = centerE.Y;
compositeTransformE.Rotation = e.Delta.Rotation;
compositeTransformE.TranslateX = e.Delta.Translation.X;
compositeTransformE.TranslateY = e.Delta.Translation.Y;