Geotools - Draw Features on WMS and OSM - gdal

I am trying to overlay a shp file on wms layer using the example http://docs.geotools.org/latest/userguide/tutorial/raster/image.html.
I am continuously getting the error:
Exception in thread "main" java.lang.UnsupportedOperationException: Trying to get a reader from an unknown format.
at org.geotools.coverage.grid.io.UnknownFormat.getReader(UnknownFormat.java:62)
at com.qedrix.map.maplotr.Demo1.displayLayers(Demo1.java:121)
at com.qedrix.map.maplotr.Demo1.main(Demo1.java:229)
when the code tries to read the WMS image.
My code looks like this:
public class Demo1 {
private AbstractGridCoverage2DReader reader = null;
private StyleFactory sf = CommonFactoryFinder.getStyleFactory();
private FilterFactory2 ff = CommonFactoryFinder.getFilterFactory2();
/**
* This method examines the names of the sample dimensions in the provided
* coverage looking for "red...", "green..." and "blue..." (case insensitive
* match). If these names are not found it uses bands 1, 2, and 3 for the
* red, green and blue channels. It then sets up a raster symbolizer and
* returns this wrapped in a Style.
*
* #return a new Style object containing a raster symbolizer set up for RGB
* image
*/
private Style createRGBStyle() {
GridCoverage2D cov = null;
try {
cov = reader.read(null);
} catch (IOException giveUp) {
throw new RuntimeException(giveUp);
}
// We need at least three bands to create an RGB style
int numBands = cov.getNumSampleDimensions();
if (numBands < 3) {
return null;
}
// Get the names of the bands
String[] sampleDimensionNames = new String[numBands];
for (int i = 0; i < numBands; i++) {
GridSampleDimension dim = cov.getSampleDimension(i);
sampleDimensionNames[i] = dim.getDescription().toString();
}
final int RED = 0, GREEN = 1, BLUE = 2;
int[] channelNum = { -1, -1, -1 };
// We examine the band names looking for "red...", "green...",
// "blue...".
// Note that the channel numbers we record are indexed from 1, not 0.
for (int i = 0; i < numBands; i++) {
String name = sampleDimensionNames[i].toLowerCase();
if (name != null) {
if (name.matches("red.*")) {
channelNum[RED] = i + 1;
} else if (name.matches("green.*")) {
channelNum[GREEN] = i + 1;
} else if (name.matches("blue.*")) {
channelNum[BLUE] = i + 1;
}
}
}
// If we didn't find named bands "red...", "green...", "blue..."
// we fall back to using the first three bands in order
if (channelNum[RED] < 0 || channelNum[GREEN] < 0 || channelNum[BLUE] < 0) {
channelNum[RED] = 1;
channelNum[GREEN] = 2;
channelNum[BLUE] = 3;
}
// Now we create a RasterSymbolizer using the selected channels
SelectedChannelType[] sct = new SelectedChannelType[cov.getNumSampleDimensions()];
ContrastEnhancement ce = sf.contrastEnhancement(ff.literal(1.0), ContrastMethod.NORMALIZE);
for (int i = 0; i < 3; i++) {
sct[i] = sf.createSelectedChannelType(String.valueOf(channelNum[i]), ce);
}
RasterSymbolizer sym = sf.getDefaultRasterSymbolizer();
ChannelSelection sel = sf.channelSelection(sct[RED], sct[GREEN], sct[BLUE]);
sym.setChannelSelection(sel);
return SLD.wrapSymbolizers(sym);
}
public void displayLayers() {
File rasterFile = fetchWmsImage();
AbstractGridFormat format = GridFormatFinder.findFormat(rasterFile);
this.reader = format.getReader(rasterFile);
// Initially display the raster in greyscale using the
// data from the first image band
Style rasterStyle = createRGBStyle();
// Create a basic style with yellow lines and no fill
Style shpStyle = SLD.createPointStyle("point", Color.YELLOW, Color.GRAY, 0.0f, 1.5f);
MapContent map = new MapContent();
map.setTitle("ImageLab");
MapViewport vp = new MapViewport();
org.geotools.map.Layer rasterLayer = new GridReaderLayer(reader, rasterStyle);
map.addLayer(rasterLayer);
saveImage(map, "final.jpeg", 583);
}
public File fetchWmsImage() {
URL url = null;
try {
url = new URL("http://184.106.187.247:8080/geoserver/rg/wms?version=1.1.0");
} catch (MalformedURLException e) {
// will not happen
}
WebMapServer wms = null;
try {
wms = new WebMapServer(url);
WMSCapabilities capabilities = wms.getCapabilities();
Layer[] layers = WMSUtils.getNamedLayers(capabilities);
GetMapRequest request = wms.createGetMapRequest();
request.setFormat("image/png");
request.setDimensions("583", "420");
request.setTransparent(true);
request.setSRS("EPSG:900913");
request.setBBox("-13019428.542822,3922163.1648461,-13013051.407366,3929863.8567165");
request.setProperty("isBaseLayer", "false");
request.setProperty("opacity", ".2");
for (Layer layer : WMSUtils.getNamedLayers(capabilities)) {
if (layer.getName().equals("rg:parcels"))
request.addLayer(layer);
}
GetMapResponse response = (GetMapResponse) wms.issueRequest(request);
BufferedImage image = ImageIO.read(response.getInputStream());
File rasterFile = new File("C:\\Users\\samabhik\\Workspace\\MAP\\data\\out.png");
ImageIO.write(image, "png", rasterFile);
return rasterFile;
} catch (ServiceException e) {
// TODO Auto-generated catch block
e.printStackTrace();
} catch (IOException e) {
// TODO Auto-generated catch block
e.printStackTrace();
} finally {
}
return null;
}
public void saveImage(final MapContent map, final String file, final int imageWidth) {
GTRenderer renderer = new StreamingRenderer();
renderer.setMapContent(map);
Rectangle imageBounds = null;
ReferencedEnvelope mapBounds = null;
try {
mapBounds = map.getMaxBounds();
double heightToWidth = mapBounds.getSpan(1) / mapBounds.getSpan(0);
imageBounds = new Rectangle(0, 0, imageWidth, (int) Math.round(imageWidth * heightToWidth));
} catch (Exception e) {
// failed to access map layers
throw new RuntimeException(e);
}
BufferedImage image = new BufferedImage(imageBounds.width, imageBounds.height, BufferedImage.TYPE_INT_RGB);
Graphics2D gr = image.createGraphics();
gr.setPaint(Color.WHITE);
gr.fill(imageBounds);
try {
renderer.paint(gr, imageBounds, mapBounds);
File fileToSave = new File(file);
ImageIO.write(image, "jpeg", fileToSave);
} catch (IOException e) {
throw new RuntimeException(e);
}
}
public static void main(String[] args) {
Demo1 demo = new Demo1();
demo.displayLayers();
}
}
My pom dependency looks like this:
<dependency>
<groupId>org.geotools</groupId>
<artifactId>gt-shapefile</artifactId>
<version>${geotools.version}</version>
</dependency>
<dependency>
<groupId>org.geotools</groupId>
<artifactId>gt-swing</artifactId>
<version>${geotools.version}</version>
</dependency>
<dependency>
<groupId>org.geotools</groupId>
<artifactId>gt-epsg-hsql</artifactId>
<version>${geotools.version}</version>
</dependency>
<dependency>
<groupId>org.geotools</groupId>
<artifactId>gt-geotiff</artifactId>
<version>${geotools.version}</version>
</dependency>
<dependency>
<groupId>org.geotools</groupId>
<artifactId>gt-image</artifactId>
<version>${geotools.version}</version>
</dependency>
<dependency>
<groupId>org.geotools</groupId>
<artifactId>gt-wms</artifactId>
<version>${geotools.version}</version>
</dependency>
<dependency>
<groupId>org.geotools</groupId>
<artifactId>gt-coverage</artifactId>
<version>${geotools.version}</version>
</dependency>
Somewhere I read that it could a GDAL issue. But I couldn't figure out how to resolve it. I am using eclipse on 64 JDK 1.6 and Win 7 amd64.
Please help, someone..
Just tested the image file using gdalinfo.exe found in the GDAL native libraries path. Here's the report:
Driver: PNG/Portable Network Graphics
Files: ..\..\Workspace\MAP\data\out2.png
Size is 583, 420
Coordinate System is `'
Image Structure Metadata:
INTERLEAVE=PIXEL
Corner Coordinates:
Upper Left ( 0.0, 0.0)
Lower Left ( 0.0, 420.0)
Upper Right ( 583.0, 0.0)
Lower Right ( 583.0, 420.0)
Center ( 291.5, 210.0)
Band 1 Block=583x1 Type=Byte, ColorInterp=Red
Mask Flags: PER_DATASET ALPHA
Band 2 Block=583x1 Type=Byte, ColorInterp=Green
Mask Flags: PER_DATASET ALPHA
Band 3 Block=583x1 Type=Byte, ColorInterp=Blue
Mask Flags: PER_DATASET ALPHA
Band 4 Block=583x1 Type=Byte, ColorInterp=Alpha
FURTHER UPDATES
I just tried and changed the out format for WMS from images/png to images/geotiff and this now works partially ( the final image that is generated by geotools is Black & White). Why this is happening? And why it didn't work with PNG?

Check that you have JAI and ImageIO avaialble. Either via maven dependency, or by installing them as Java Extensions as outlined in the Quickstart.
http://docs.geotools.org/latest/userguide/tutorial/quickstart/eclipse.html
Update:
I was able to catch up on the GeoTools IRC channel and confirm this is an environment issue. In class works for me fashion I was able to connect using the WMSLab tutorial example to the WMS in question and display results.
So lets check out "problem" environment::
System.out.println( GeoTools.getAboutInfo() );
Result::
GeoTools version 9-SNAPSHOT (built from ree5a6830d2c774ee9a4eb9e024d989c2a1bcdfe3)
Java version: 1.7.0_09
Operating system: Windows 7 6.1
GeoTools jars on classpath:
A couple ideas:
Check out the ImageLab to confirm JAI / ImageIO is availble?
This worked!
Save the image retrieved from the browser and try loading it using straight up Java
Example from WorldImageReader::
File input = ...
ImageInputStreamSpi inStreamSPI= ImageIOExt.getImageInputStreamSPI( input );
if (inStreamSPI == null) throw new IllegalStateException("Unsuppported");
Apparently this was not successful?
GeoTools has not gone through QA on Java 7 yet, downgrade to Java 6?
Recently a Java 7 build box has been volunteered. When GeoTools works in Java 7 the release notes and tutorials will be updated.
Especially on windows the implementation of PNG support is suspect.
The following code from the uDig project disables the native implementation allowing the pure Java implementation to have a crack at it::
if (Platform.getOS().equals(Platform.OS_WIN32)) {
try {
// PNG native support is not very good .. this turns it off
ImageUtilities.allowNativeCodec("png", ImageReaderSpi.class, false); //$NON-NLS-1$
} catch (Throwable t) {
// we should not die if JAI is missing; we have a warning for that...
System.out.println("Difficulty turnning windows native PNG support (which will result in scrambled images from WMS servers)"); //$NON-NLS-1$
t.printStackTrace();
}
}

Related

JavaFx Problem with Service and Task with different Parameters and multiple calls

i need to create a Service and a Task to calculate the Mandelbrot and JuliaSet.
The calculation is working pretty good and now we need to give it into a Service and call it inside the task.
I have now the Problem, that every Time the task is executed, the Parameters are different.
I wrote a little Setter Function to parse those Arguments to the Service first.
But i'm not sure if this is the right Way?!
I'm also not sure how to correctly call the service in the main Function?
Now it works like this:
First time the service is executed, everything seems to work, but when i call it a secound Time, nothing seems to happen.....
Just to make sure: A Service can execute the Task multiple Times at the same Time?
Is it also posible with different Parameters?
Code Task:
private MandelbrotRenderOptions mandelbrot;
private JuliaRenderOptions juliaset;
private Canvas leftCanvas;
private Canvas rightCanvas;
//Constructor
public RenderTask(Canvas leftCanvas, Canvas rightCanvas) {
this.leftCanvas = leftCanvas;
this.rightCanvas = rightCanvas;
}
public void setOptions(MandelbrotRenderOptions mandelbrot, JuliaRenderOptions juliaset) {
this.mandelbrot = mandelbrot;
this.juliaset = juliaset;
}
#Override
protected Void call() throws Exception {
try {
System.out.println("HALLO SERVICE");
// instances for rendering the left canvas [pixel.data -> PixelWriter -> WritableImage -> GraphicsContext -> Canvas]
// creates an writable image which contains the dimensions of the canvas
WritableImage wimLeftCanvas = new WritableImage((int) leftCanvas.getWidth(), (int) leftCanvas.getHeight());
// instance which can write data into the image (instance above)
PixelWriter pwLeftCanvas = wimLeftCanvas.getPixelWriter();
// instance which fills the canvas
GraphicsContext gcLeftCanvas = leftCanvas.getGraphicsContext2D();
// instances for rendering the right canvas [pixel.data -> PixelWriter -> WritableImage -> GraphicsContext -> Canvas]
WritableImage wimRightCanvas = new WritableImage((int) rightCanvas.getWidth(), (int) rightCanvas.getHeight());
PixelWriter pwRight = wimRightCanvas.getPixelWriter();
GraphicsContext gcRightCanvas = rightCanvas.getGraphicsContext2D();
gcLeftCanvas.clearRect(0, 0, leftCanvas.getWidth(), leftCanvas.getHeight());
//Pixel[][] pixels; // contains pixel data for rendering canvas
// instances for logging the rendered data
SimpleImage simpleImageLeftCanvas = new SimpleImage((int) leftCanvas.getWidth(), (int) leftCanvas.getHeight());
SimpleImage simpleImageRightCanvas = new SimpleImage((int) rightCanvas.getWidth(), (int) rightCanvas.getHeight());
short dataSimpleImage[] = new short[3]; // contains pixel data for logging rendered data
// fills left canvas (mandelbrot) PixelWriter instance with data
Pixel[][] pixels = mandelbrot.setAllPixels();
FractalLogger.logRenderCall(mandelbrot);
for (int y = 0; y < (int) leftCanvas.getHeight(); y++) {
for (int x = 0; x < (int) leftCanvas.getWidth(); x++) {
// parses color data to PixelWriter instance
Color color = Color.rgb(pixels[y][x].getRed(), pixels[y][x].getGreen(), pixels[y][x].getBlue());
pwLeftCanvas.setColor(x, y, color);
for (int depth = 0; depth < 3; depth++) {
if (depth == 0) {
dataSimpleImage[depth] = pixels[y][x].getRed();
} else if (depth == 1) {
dataSimpleImage[depth] = pixels[y][x].getGreen();
} else {
dataSimpleImage[depth] = pixels[y][x].getBlue();
}
}
try {
simpleImageLeftCanvas.setPixel(x, y, dataSimpleImage); // because data must not be null
} catch (InvalidDepthException e) {
e.printStackTrace();
}
}
}
// logs that rendering of mandelbrot is finished
FractalLogger.logRenderFinished(FractalType.MANDELBROT, simpleImageLeftCanvas);
// fills left canvas (juliaset) PixelWriter instance with data
pixels = juliaset.setAllPixels();
FractalLogger.logRenderCall(mandelbrot);
for (int y = 0; y < (int) rightCanvas.getHeight(); y++) {
for (int x = 0; x < (int) rightCanvas.getWidth(); x++) {
// pareses color to PixelWriter instance
Color color = Color.rgb(pixels[y][x].getRed(), pixels[y][x].getGreen(), pixels[y][x].getBlue());
pwRight.setColor(x, y, color);
for (int depth = 0; depth < 3; depth++) {
if (depth == 0) {
dataSimpleImage[depth] = pixels[y][x].getRed();
} else if (depth == 1) {
dataSimpleImage[depth] = pixels[y][x].getGreen();
} else {
dataSimpleImage[depth] = pixels[y][x].getBlue();
}
}
try {
simpleImageRightCanvas.setPixel(x, y, dataSimpleImage); // because data must not be null
} catch (InvalidDepthException e) {
e.printStackTrace();
}
}
}
// logs that rendering of juliaset is finished
FractalLogger.logRenderFinished(FractalType.JULIA, simpleImageRightCanvas);
// writes data from WriteableImage instance to GraphicsContext instance, which finally parses renders it into the canvas
gcLeftCanvas.drawImage(wimLeftCanvas, 0, 0);
FractalLogger.logDrawDone(FractalType.MANDELBROT);
gcRightCanvas.drawImage(wimRightCanvas, 0, 0);
FractalLogger.logDrawDone(FractalType.JULIA);
return null;
} catch (Exception e) {
System.out.println("ERROR");
System.out.println(e);
return null;
}
}
#Override
protected void cancelled()
{
super.cancelled();
updateMessage("The task was cancelled.");
}
#Override
protected void failed()
{
super.failed();
updateMessage("The task failed.");
}
#Override
public void succeeded()
{
super.succeeded();
updateMessage("The task finished successfully.");
} ```
And i call it like this in the main:
``` Service service = new Service() {
#Override
protected Task createTask() {
return new RenderTask(leftCanvas, rightCanvas);
}
};
task.setOptions(mandelbrot, juliaset);
service.restart(); ```

What is the optimal render loop in Dart 2?

I am looking for ideas regarding an optimal/minimal structure for the inner render loop in Dart 2, for a 2d game (if that part matters).
Clarification / Explanation: Every framework / language has an efficient way to:
1) Deal with time.
2) Render to the screen (via memory, a canvas, an image, or whatever).
For an example, here is someone that answered this for the C# language. Being new to Flutter / Dart, my first attempt (below), is failing to work and as of right now, I can not tell where the problem is.
I have searched high and low without finding any help on this, so if you can assist, you have my eternal gratitude.
There is a post on Reddit by ‘byu/inu-no-policemen’ (a bit old). I used this to start. I suspect that it is crushing the garbage collector or leaking memory.
This is what I have so far, but it crashes pretty quickly (at least in the debugger):
import 'dart:ui';
import 'dart:typed_data';
import 'dart:math' as math;
import 'dart:async';
main() async {
var deviceTransform = new Float64List(16)
..[0] = 1.0 // window.devicePixelRatio
..[5] = 1.0 // window.devicePixelRatio
..[10] = 1.0
..[15] = 1.0;
var previous = Duration.zero;
var initialSize = await Future<Size>(() {
if (window.physicalSize.isEmpty) {
var completer = Completer<Size>();
window.onMetricsChanged = () {
if (!window.physicalSize.isEmpty) {
completer.complete(window.physicalSize);
}
};
return completer.future;
}
return window.physicalSize;
});
var world = World(initialSize.width / 2, initialSize.height / 2);
window.onBeginFrame = (now) {
// we rebuild the screenRect here since it can change
var screenRect = Rect.fromLTWH(0.0, 0.0, window.physicalSize.width, window.physicalSize.height);
var recorder = PictureRecorder();
var canvas = Canvas(recorder, screenRect);
var delta = previous == Duration.zero ? Duration.zero : now - previous;
previous = now;
var t = delta.inMicroseconds / Duration.microsecondsPerSecond;
world.update(t);
world.render(t, canvas);
var builder = new SceneBuilder()
..pushTransform(deviceTransform)
..addPicture(Offset.zero, recorder.endRecording())
..pop();
window.render(builder.build());
window.scheduleFrame();
};
window.scheduleFrame();
window.onPointerDataPacket = (packet) {
var p = packet.data.first;
world.input(p.physicalX, p.physicalY);
};
}
class World {
static var _objectColor = Paint()..color = Color(0xa0a0a0ff);
static var _s = 200.0;
static var _obejectRect = Rect.fromLTWH(-_s / 2, -_s / 2, _s, _s);
static var _rotationsPerSecond = 0.25;
var _turn = 0.0;
double _x;
double _y;
World(this._x, this._y);
void input(double x, double y) { _x = x; _y = y; }
void update(double t) { _turn += t * _rotationsPerSecond; }
void render(double t, Canvas canvas) {
var tau = math.pi * 2;
canvas.translate(_x, _y);
canvas.rotate(tau * _turn);
canvas.drawRect(_obejectRect, _objectColor);
}
}
Well, after a month of beating my face against this, I finally figured out the right question and that got me to this:
Flutter Layers / Raw
// Copyright 2015 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// This example shows how to perform a simple animation using the raw interface
// to the engine.
import 'dart:math' as math;
import 'dart:typed_data';
import 'dart:ui' as ui;
void beginFrame(Duration timeStamp) {
// The timeStamp argument to beginFrame indicates the timing information we
// should use to clock our animations. It's important to use timeStamp rather
// than reading the system time because we want all the parts of the system to
// coordinate the timings of their animations. If each component read the
// system clock independently, the animations that we processed later would be
// slightly ahead of the animations we processed earlier.
// PAINT
final ui.Rect paintBounds = ui.Offset.zero & (ui.window.physicalSize / ui.window.devicePixelRatio);
final ui.PictureRecorder recorder = ui.PictureRecorder();
final ui.Canvas canvas = ui.Canvas(recorder, paintBounds);
canvas.translate(paintBounds.width / 2.0, paintBounds.height / 2.0);
// Here we determine the rotation according to the timeStamp given to us by
// the engine.
final double t = timeStamp.inMicroseconds / Duration.microsecondsPerMillisecond / 1800.0;
canvas.rotate(math.pi * (t % 1.0));
canvas.drawRect(ui.Rect.fromLTRB(-100.0, -100.0, 100.0, 100.0),
ui.Paint()..color = const ui.Color.fromARGB(255, 0, 255, 0));
final ui.Picture picture = recorder.endRecording();
// COMPOSITE
final double devicePixelRatio = ui.window.devicePixelRatio;
final Float64List deviceTransform = Float64List(16)
..[0] = devicePixelRatio
..[5] = devicePixelRatio
..[10] = 1.0
..[15] = 1.0;
final ui.SceneBuilder sceneBuilder = ui.SceneBuilder()
..pushTransform(deviceTransform)
..addPicture(ui.Offset.zero, picture)
..pop();
ui.window.render(sceneBuilder.build());
// After rendering the current frame of the animation, we ask the engine to
// schedule another frame. The engine will call beginFrame again when its time
// to produce the next frame.
ui.window.scheduleFrame();
}
void main() {
ui.window.onBeginFrame = beginFrame;
ui.window.scheduleFrame();
}

Remove underlines from text in PDF file

I have a bunch of PDF files with broken links.
I need to remove those links and right now I can do the following:
Remove link actions
Change text color from blue to black
What I can't do is to remove blue underlines below text that was a link before.
I tried several PDF libraries for .NET (because this is my primary platform)
Aspost.PDF
PDFSharp
ceTe DynamicPDF
PDFBox
You are welcone to recommend solution on any prograning language, platform and library. I just need to do this.
In case of the sample document the underlines are drawn as blue (RGB 0,0,1) filled vector graphics rectangles (long, slim ones). As blue only is used for the links, we can use that criterion to find the rectangles in question.
Here a sample implementation using PDFBox 1.8.10:
void removeBlueRectangles(PDDocument document) throws IOException
{
List<?> pages = document.getDocumentCatalog().getAllPages();
for (int i = 0; i < pages.size(); i++)
{
PDPage page = (PDPage) pages.get(i);
PDStream contents = page.getContents();
PDFStreamParser parser = new PDFStreamParser(contents.getStream());
parser.parse();
List<Object> tokens = parser.getTokens();
Stack<Boolean> blueState = new Stack<Boolean>();
blueState.push(false);
for (int j = 0; j < tokens.size(); j++)
{
Object next = tokens.get(j);
if (next instanceof PDFOperator)
{
PDFOperator op = (PDFOperator) next;
if (op.getOperation().equals("q"))
{
blueState.push(blueState.peek());
}
else if (op.getOperation().equals("Q"))
{
blueState.pop();
}
else if (op.getOperation().equals("rg"))
{
if (j > 2)
{
Object r = tokens.get(j-3);
Object g = tokens.get(j-2);
Object b = tokens.get(j-1);
if (r instanceof COSNumber && g instanceof COSNumber && b instanceof COSNumber)
{
blueState.pop();
blueState.push((
Math.abs(((COSNumber)r).floatValue() - 0) < 0.001 &&
Math.abs(((COSNumber)g).floatValue() - 0) < 0.001 &&
Math.abs(((COSNumber)b).floatValue() - 1) < 0.001));
}
}
}
else if (op.getOperation().equals("f"))
{
if (blueState.peek() && j > 0)
{
Object re = tokens.get(j-1);
if (re instanceof PDFOperator && ((PDFOperator)re).getOperation().equals("re"))
{
tokens.set(j, PDFOperator.getOperator("n"));
}
}
}
}
}
PDStream updatedStream = new PDStream(document);
OutputStream out = updatedStream.createOutputStream();
ContentStreamWriter tokenWriter = new ContentStreamWriter(out);
tokenWriter.writeTokens(tokens);
page.setContents(updatedStream);
}
}
(RemoveUnderlines.java)
original.pdf
Applying this to your first sample file original.pdf
public void testOriginal() throws IOException, COSVisitorException
{
try ( InputStream resourceStream = getClass().getResourceAsStream("original.pdf") )
{
PDDocument document = PDDocument.loadNonSeq(resourceStream, null);
removeBlueRectangles(document);
document.save("original-noBlueRectangles.pdf");
document.close();
}
}
(RemoveUnderlines.java)
results in
1178.pdf
You commented
After testing this on many files I have to say this solution works incorrectly in some cases. For example in for this file (dropbox.com/s/23g54bvt781lb93/1178.pdf?dl=0) it removes the entire content of the page. Keep searching..
So I applyed the code to your new sample file 1178.pdf
public void test1178() throws IOException, COSVisitorException
{
try ( InputStream resourceStream = getClass().getResourceAsStream("1178.pdf") )
{
PDDocument document = PDDocument.loadNonSeq(resourceStream, null);
removeBlueRectangles(document);
document.save(new File(RESULT_FOLDER, "1178-noBlueRectangles.pdf"));
document.close();
}
}
(RemoveUnderlines.java)
which resulted in
So I cannot confirm your claim that the solution works incorrectly; in particular I see that it does not remove the entire content of the page.
As I cannot reproduce your observation, I assume there are additional issues in your setup you have not yet mentioned.

Saved joints from kinect skeleton track

I work with the kinect. My goal is to store the values ​​gives me the kinect for the location of the body (head,hand etc). I have written some code but I can not understand what values ​​should save and how.I want to store in the db or in a txt file the position of the head,hands and foots.I want the data to understand the movements of the person who stand in front of kinect.For example if someone move her hand the kinect will sent a value.I must store it and understand and to understand the move taken.Sorry for the few info
here is my code:
using System;
using System.Windows;
using System.Windows.Controls;
using System.Windows.Media;
using System.Windows.Media.Imaging;
using System.Windows.Shapes;
using Microsoft.Kinect;
using System.Linq;
using System.IO;
namespace KinectSkeletonApplication1
{
public partial class MainWindow : Window
{
//Instantiate the Kinect runtime. Required to initialize the device.
//IMPORTANT NOTE: You can pass the device ID here, in case more than one Kinect device is connected.
KinectSensor sensor = KinectSensor.KinectSensors[0];
byte[] pixelData;
Skeleton[] skeletons;
public MainWindow()
{
InitializeComponent();
///////////////////////////////////
///////////////////////////////
//Runtime initialization is handled when the window is opened. When the window
//is closed, the runtime MUST be unitialized.
this.Loaded += new RoutedEventHandler(MainWindow_Loaded);
this.Unloaded += new RoutedEventHandler(MainWindow_Unloaded);
sensor.ColorStream.Enable();
sensor.SkeletonStream.Enable();
}
void runtime_SkeletonFrameReady(object sender, SkeletonFrameReadyEventArgs e)
{
bool receivedData = false;
using (SkeletonFrame SFrame = e.OpenSkeletonFrame())
{
if (SFrame == null)
{
// The image processing took too long. More than 2 frames behind.
}
else
{
skeletons = new Skeleton[SFrame.SkeletonArrayLength];
SFrame.CopySkeletonDataTo(skeletons);
receivedData = true;
}
}
if (receivedData)
{
Skeleton currentSkeleton = (from s in skeletons
where s.TrackingState == SkeletonTrackingState.Tracked
select s).FirstOrDefault();
if (currentSkeleton != null)
{
SetEllipsePosition(head, currentSkeleton.Joints[JointType.Head]);
SetEllipsePosition(leftHand, currentSkeleton.Joints[JointType.HandLeft]);
SetEllipsePosition(rightHand, currentSkeleton.Joints[JointType.HandRight]);
SetEllipsePosition(shoulder_center, currentSkeleton.Joints[JointType.ShoulderCenter]);
}
}
}
//This method is used to position the ellipses on the canvas
//according to correct movements of the tracked joints.
//IMPORTANT NOTE: Code for vector scaling was imported from the Coding4Fun Kinect Toolkit
//available here: http://c4fkinect.codeplex.com/
//I only used this part to avoid adding an extra reference.
private void SetEllipsePosition(Ellipse ellipse, Joint joint)
{
Microsoft.Kinect.SkeletonPoint vector = new Microsoft.Kinect.SkeletonPoint();
vector.X = ScaleVector(640, joint.Position.X);
vector.Y = ScaleVector(480, -joint.Position.Y);
vector.Z = joint.Position.Z;
Joint updatedJoint = new Joint();
updatedJoint = joint;
updatedJoint.TrackingState = JointTrackingState.Tracked;
updatedJoint.Position = vector;
Canvas.SetLeft(ellipse, updatedJoint.Position.X);
Canvas.SetTop(ellipse, updatedJoint.Position.Y);
}
private float ScaleVector(int length, float position)
{
float value = (((((float)length) / 1f) / 2f) * position) + (length / 2);
if (value > length)
{
return (float)length;
}
if (value < 0f)
{
return 0f;
}
string r = Convert.ToString(value);
string path = #"C:\Test\MyTest.txt";
// This text is added only once to the file.
if (!File.Exists(path))
{
// Create a file to write to.
string createText = "Hello and Welcome" + Environment.NewLine;
File.WriteAllText(path, createText);
}
// This text is always added, making the file longer over time
// if it is not deleted.
//string appendText = "This is extra text" + Environment.NewLine;
File.AppendAllText(path, r);
// Open the file to read from.
string readText = File.ReadAllText(path);
return value;
}
void MainWindow_Unloaded(object sender, RoutedEventArgs e)
{
sensor.Stop();
}
void MainWindow_Loaded(object sender, RoutedEventArgs e)
{
sensor.SkeletonFrameReady += runtime_SkeletonFrameReady;
sensor.ColorFrameReady += runtime_VideoFrameReady;
sensor.Start();
}
void runtime_VideoFrameReady(object sender, ColorImageFrameReadyEventArgs e)
{
bool receivedData = false;
using (ColorImageFrame CFrame = e.OpenColorImageFrame())
{
if (CFrame == null)
{
// The image processing took too long. More than 2 frames behind.
}
else
{
pixelData = new byte[CFrame.PixelDataLength];
CFrame.CopyPixelDataTo(pixelData);
receivedData = true;
}
}
if (receivedData)
{
BitmapSource source = BitmapSource.Create(640, 480, 96, 96,
PixelFormats.Bgr32, null, pixelData, 640 * 4);
videoImage.Source = source;
}
}
}
}
I think what you are looking for is getting the X, Y, Z coordiantes for certain Joints?
In this case add this code:
Vector3D ShoulderCenter = new Vector3D(skeleton.Joints[JointType.ShoulderCenter].Position.X, skeleton.Joints[JointType.ShoulderCenter].Position.Y, skeleton.Joints[JointType.ShoulderCenter].Position.Z);
Vector3D RightShoulder = new Vector3D(skeleton.Joints[JointType.ShoulderRight].Position.X, skeleton.Joints[JointType.ShoulderRight].Position.Y, skeleton.Joints[JointType.ShoulderRight].Position.Z);
Vector3D LeftShoulder = new Vector3D(skeleton.Joints[JointType.ShoulderLeft].Position.X, skeleton.Joints[JointType.ShoulderLeft].Position.Y, skeleton.Joints[JointType.ShoulderLeft].Position.Z);
Vector3D RightElbow = new Vector3D(skeleton.Joints[JointType.ElbowRight].Position.X, skeleton.Joints[JointType.ElbowRight].Position.Y, skeleton.Joints[JointType.ElbowRight].Position.Z);
Vector3D LeftElbow = new Vector3D(skeleton.Joints[JointType.ElbowLeft].Position.X, skeleton.Joints[JointType.ElbowLeft].Position.Y, skeleton.Joints[JointType.ElbowLeft].Position.Z);
Vector3D RightWrist = new Vector3D(skeleton.Joints[JointType.WristRight].Position.X, skeleton.Joints[JointType.WristRight].Position.Y, skeleton.Joints[JointType.WristRight].Position.Z);
Vector3D LeftWrist = new Vector3D(skeleton.Joints[JointType.WristLeft].Position.X, skeleton.Joints[JointType.WristLeft].Position.Y, skeleton.Joints[JointType.WristLeft].Position.Z);
This only defines the Vectors for the upper Body part as you can see. Just add the missing joints the way I did it.
You need following assemblies:
using System.Windows.Media;
using Microsoft.Kinect.Toolkit.Fusion;
using System.Windows.Media.Media3D;

getUserPixels - alternative in official Kinect SDK

Is there an alternative for the getUserPixels method offered by OpenNI in the official Kinect SDK?
How would one implement this functionality with the official Kinect SDK?
The official Kinect for Windows SDK (v1.6) does not support a direct call, such as getUserPixels, to extract a player silhouette but does contain all the information necessary to do so.
You can see this in action, in different ways, by examining two of the examples available from the Kinect for Windows Developer Toolkit.
Basic Interactions-WPF: includes a function to create a simple silhouette of the user being tracked.
Green Screen (-WPF, or -D2D): shows how to perform background subtraction to produce a green screen effect. In this example the data from the RGB camera is superimposed over a image.
The two examples do this in different ways.
Basic Interactions will pull out a BitmapMask of from the depth data which corresponds to the requested player. This has the advantage of only showing tracked users; any object not thought to be a skeleton is ignored.
Green Screen does not look for a particular user, instead opting for motion. This gives the advantage silhouetting any moving object -- such as a ball being passed between two users.
I believe the "Basic Interactions" example will show you how you implement what you are looking for. You'll have to do the work yourself, but it is possible. For example, using the "Basic Interactions" example as a base I created a UserControl that generates a simple silhouette of the user being tracked...
When the skeleton frame is ready, I pull out the player index:
private void OnSkeletonFrameReady(object sender, SkeletonFrameReadyEventArgs e)
{
using (SkeletonFrame skeletonFrame = e.OpenSkeletonFrame())
{
if (skeletonFrame != null && skeletonFrame.SkeletonArrayLength > 0)
{
if (_skeletons == null || _skeletons.Length != skeletonFrame.SkeletonArrayLength)
{
_skeletons = new Skeleton[skeletonFrame.SkeletonArrayLength];
}
skeletonFrame.CopySkeletonDataTo(_skeletons);
// grab the tracked skeleton and set the playerIndex for use pulling
// the depth data out for the silhouette.
// NOTE: this assumes only a single tracked skeleton!
this.playerIndex = -1;
for (int i = 0; i < _skeletons.Length; i++)
{
if (_skeletons[i].TrackingState != SkeletonTrackingState.NotTracked)
{
this.playerIndex = i+1;
}
}
}
}
}
Then, when the next depth frame is ready, I pull out BitmapMask for the user that corresponds to playerIndex.
private void OnDepthFrameReady(object sender, DepthImageFrameReadyEventArgs e)
{
using (DepthImageFrame depthFrame = e.OpenDepthImageFrame())
{
if (depthFrame != null)
{
// check if the format has changed.
bool haveNewFormat = this.lastImageFormat != depthFrame.Format;
if (haveNewFormat)
{
this.pixelData = new short[depthFrame.PixelDataLength];
this.depthFrame32 = new byte[depthFrame.Width * depthFrame.Height * Bgra32BytesPerPixel];
this.convertedDepthBits = new byte[this.depthFrame32.Length];
}
depthFrame.CopyPixelDataTo(this.pixelData);
for (int i16 = 0, i32 = 0; i16 < pixelData.Length && i32 < depthFrame32.Length; i16++, i32 += 4)
{
int player = pixelData[i16] & DepthImageFrame.PlayerIndexBitmask;
if (player == this.playerIndex)
{
convertedDepthBits[i32 + RedIndex] = 0x44;
convertedDepthBits[i32 + GreenIndex] = 0x23;
convertedDepthBits[i32 + BlueIndex] = 0x59;
convertedDepthBits[i32 + 3] = 0x66;
}
else if (player > 0)
{
convertedDepthBits[i32 + RedIndex] = 0xBC;
convertedDepthBits[i32 + GreenIndex] = 0xBE;
convertedDepthBits[i32 + BlueIndex] = 0xC0;
convertedDepthBits[i32 + 3] = 0x66;
}
else
{
convertedDepthBits[i32 + RedIndex] = 0x0;
convertedDepthBits[i32 + GreenIndex] = 0x0;
convertedDepthBits[i32 + BlueIndex] = 0x0;
convertedDepthBits[i32 + 3] = 0x0;
}
}
if (silhouette == null || haveNewFormat)
{
silhouette = new WriteableBitmap(
depthFrame.Width,
depthFrame.Height,
96,
96,
PixelFormats.Bgra32,
null);
SilhouetteImage.Source = silhouette;
}
silhouette.WritePixels(
new Int32Rect(0, 0, depthFrame.Width, depthFrame.Height),
convertedDepthBits,
depthFrame.Width * Bgra32BytesPerPixel,
0);
Silhouette = silhouette;
this.lastImageFormat = depthFrame.Format;
}
}
}
What I end up with is a purple silhouette of the user in a WriteableBitmap, which can be copied to an Image on the control or pulled and used elsewhere. Once you have the BitmapMask you could also map the data the color stream if you wanted a to actually see the RGB data that corresponds to that area.
You can adapt the code to simulate more closely the getUserPixels function if you like. The big part you'd be interested in would be, given a depth frame and a playerIndex:
if (depthFrame != null)
{
// check if the format has changed.
bool haveNewFormat = this.lastImageFormat != depthFrame.Format;
if (haveNewFormat)
{
this.pixelData = new short[depthFrame.PixelDataLength];
this.depthFrame32 = new byte[depthFrame.Width * depthFrame.Height * Bgra32BytesPerPixel];
this.convertedDepthBits = new byte[this.depthFrame32.Length];
}
depthFrame.CopyPixelDataTo(this.pixelData);
for (int i16 = 0, i32 = 0; i16 < pixelData.Length && i32 < depthFrame32.Length; i16++, i32 += 4)
{
int player = pixelData[i16] & DepthImageFrame.PlayerIndexBitmask;
if (player == this.playerIndex)
{
// this pixel "belongs" to the user identified in "playerIndex"
}
else
{
// not the requested user
}
}
}