castor marshaller suppress xsi - marshalling

I read a post written by you about:
Marshaller marshaller = new Marshaller(w);
marshaller.setSuppressXSIType(true);
The problem is that I'm using that method but the result didn't changed.
My code is :
Marshaller m = new Marshaller();
m.setSuppressXSIType(true);
m.setSuppressNamespaces(true);
m.setSupressXMLDeclaration(true);
m.setMarshalExtendedType(false);
m.marshal(obj, file);
But what I obtained is still the xmlns:xsi=.. and the xsi:type=.. inside the xml tag.
Am I doing something wrong? I'm using castor xml 1.3.2.

if you create the marshaller with the string writer then the problem goes away.
StringWriter st = new StringWriter();
Marshaller marshaller = new Marshaller(st);
However, if you do the below it wont work.
Marshaller marshaller = new Marshaller();
marshaller.setValidation(true);
marshaller.setSuppressXSIType(true);
marshaller.setSuppressNamespaces(true);
marshaller.setSupressXMLDeclaration(true);
marshaller.setMapping(mapping);
marshaller.marshal(order,st);

That's what I've done as well and it worked for me. Here is the example, hope it helps:
MarshallerTest.java:
import org.exolab.castor.mapping.Mapping;
import org.exolab.castor.mapping.MappingException;
import org.exolab.castor.xml.MarshalException;
import org.exolab.castor.xml.Marshaller;
import org.exolab.castor.xml.ValidationException;
import java.io.IOException;
import java.io.StringWriter;
import java.util.Arrays;
public class MarshallerTest {
public static void main(String[] args) throws IOException, MappingException, MarshalException, ValidationException {
Mapping mapping = new Mapping();
mapping.loadMapping(MarshallerTest.class.getResource("/mapping.xml"));
StringWriter sw = new StringWriter();
Marshaller marshaller = new Marshaller(sw);
marshaller.setMapping(mapping);
marshaller.setSuppressNamespaces(true);
marshaller.setSuppressXSIType(true);
Person alex = new Person();
alex.setName("alex");
alex.setHobbies(Arrays.asList(new String[]{"fishing", "hiking"}));
marshaller.marshal(alex);
System.out.println(sw.toString());
}
}
Person.java:
public class Person {
private String name;
private List<String> hobbies;
// ...getters and setters
}
castor.properties
org.exolab.castor.indent=true
output:
<?xml version="1.0" encoding="UTF-8"?>
<person>
<hobbies>fishing</hobbies>
<hobbies>hiking</hobbies>
<name>alex</name>
</person>

Related

How do I get rid of depreciated code in this class file for java antlr?

import org.antlr.v4.runtime.*;
import org.antlr.v4.runtime.tree.ParseTree;
import java.io.FileInputStream;
import java.io.InputStream;
public class Calc {
public static void main(String[] args) throws Exception {
String inputFile = null;
if ( args.length>0 ) inputFile = args[0];
InputStream is = System.in;
if ( inputFile!=null ) is = new FileInputStream(inputFile);
ANTLRInputStream input = new ANTLRInputStream(is);
CalculatorLexer lexer = new CalculatorLexer(input);
CommonTokenStream tokens = new CommonTokenStream(lexer);
CalculatorParser parser = new CalculatorParser(tokens);
ParseTree tree = parser.program(); // parse
CalcVisitor calcc = new CalcVisitor();
calcc.visit(tree);
}
}
As far as I know, I am pretty sure the ANTLRFileStream is what is depricated, but I have tried replacing it with CharStreams, but the code I try and run keeps resulting in an error. How can I fix this?
Try something like this:
public static void main(String[] args) throws Exception {
CharStream charStream = args.length > 0
? CharStreams.fromFileName(args[0])
: CharStreams.fromStream(System.in);
CalculatorLexer lexer = new CalculatorLexer(charStream);
CommonTokenStream tokens = new CommonTokenStream(lexer);
CalculatorParser parser = new CalculatorParser(tokens);
ParseTree tree = parser.program(); // parse
CalcVisitor calcc = new CalcVisitor();
calcc.visit(tree);
}

Exception occurs while performing update operation using the java code in bigquery

I am using the jobs.query API to update the bigquery table records using java.
While running this as a part of stability tests for a longer time, we get an exception. Excpetion occurs almost after 5 hours, 11 hours...
The problem is, exception returns blank error message, blank cause, and blank stack trace. So, in this case, I am not able to find the actual cause of the error.
Request your response on this.
Thanks in advance!
package app;
import java.io.File;
import java.io.FileInputStream;
import java.io.IOException;
import java.io.InputStream;
import java.text.ParseException;
import com.google.api.client.googleapis.auth.oauth2.GoogleCredential;
import com.google.api.client.http.HttpTransport;
import com.google.api.client.http.javanet.NetHttpTransport;
import com.google.api.client.json.JsonFactory;
import com.google.api.client.json.jackson2.JacksonFactory;
import com.google.api.services.bigquery.Bigquery;
import com.google.api.services.bigquery.BigqueryScopes;
import com.google.api.services.bigquery.model.QueryRequest;
import com.google.api.services.bigquery.model.QueryResponse;
public class WhereCLause {
//Create Entry Point: Main function
public static void main(String[] args) throws IOException, ParseException {
String projectId = "projectId";
// Create a new Bigquery client authorized via Application Credentials.
Bigquery bigquery = createAuthorizedClient();
String query = "UPDATE datasetId.tableName SET Name = 'RefUpdate' WHERE ID = 001";
System.out.println(query);
executeQuery(query, bigquery, projectId);
}
public static Bigquery createAuthorizedClient() throws IOException {
String jsonFilePath="file.json";
// Create the credential
HttpTransport transport = new NetHttpTransport();
JsonFactory jsonFactory = new JacksonFactory();
InputStream credentialStream = new FileInputStream(new File(jsonFilePath));
GoogleCredential credential = GoogleCredential.fromStream(credentialStream, transport, jsonFactory);
if (credential.createScopedRequired()) {
credential = credential.createScoped(BigqueryScopes.all());
}
return new Bigquery.Builder(transport, jsonFactory, credential).setApplicationName("BigqueryApplication").build();
}
private static void executeQuery(String querySql, Bigquery bigquery, String projectId) throws IOException {
QueryResponse query = bigquery.jobs().query(projectId, new QueryRequest().setQuery(querySql).setUseLegacySql(true)).execute();
System.out.println(query.getNumDmlAffectedRows());
System.err.println(query.getErrors());
}
}

Query Expansion lucene

I am new to lucene and I am trying to do query expansion.
I have referred to these two posts (first , second) and I've managed to reuse the code in a way that suits version 6.0.0, as the one in the previous is deprecated.
The issue is, either I'm not getting a results or I didn't access the results (expanded queries) appropriately.
Here is my code:
import com.sun.corba.se.impl.util.Version;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileReader;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.io.Reader;
import java.io.StringReader;
import java.io.UnsupportedEncodingException;
import org.apache.lucene.analysis.standard.StandardAnalyzer;
import java.net.URLDecoder;
import java.net.URLEncoder;
import java.text.ParseException;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.analysis.core.LowerCaseFilter;
import org.apache.lucene.analysis.standard.ClassicTokenizer;
import org.apache.lucene.analysis.standard.StandardFilter;
import org.apache.lucene.analysis.synonym.SynonymFilter;
import org.apache.lucene.analysis.synonym.SynonymMap;
import org.apache.lucene.analysis.synonym.WordnetSynonymParser;
import org.apache.lucene.analysis.util.CharArraySet;
import org.apache.lucene.util.*;
public class Graph extends Analyzer
{
protected static TokenStreamComponents createComponents(String fieldName, Reader reader) throws ParseException{
System.out.println("1");
// TODO Auto-generated method stub
Tokenizer source = new ClassicTokenizer();
source.setReader(reader);
TokenStream filter = new StandardFilter( source);
filter = new LowerCaseFilter(filter);
SynonymMap mySynonymMap = null;
try {
mySynonymMap = buildSynonym();
} catch (IOException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
filter = new SynonymFilter(filter, mySynonymMap, false);
return new TokenStreamComponents(source, filter);
}
private static SynonymMap buildSynonym() throws IOException, ParseException
{ System.out.print("build");
File file = new File("wn\\wn_s.pl");
InputStream stream = new FileInputStream(file);
Reader rulesReader = new InputStreamReader(stream);
SynonymMap.Builder parser = null;
parser = new WordnetSynonymParser(true, true, new StandardAnalyzer(CharArraySet.EMPTY_SET));
System.out.print(parser.toString());
((WordnetSynonymParser) parser).parse(rulesReader);
SynonymMap synonymMap = parser.build();
return synonymMap;
}
public static void main (String[] args) throws UnsupportedEncodingException, IOException, ParseException
{
Reader reader = new FileReader("C:\\input.txt"); // here I have the queries that I want to expand
TokenStreamComponents TSC = createComponents( "" , new StringReader("some text goes here"));
**System.out.print(TSC); //How to get the result from TSC????**
}
#Override
protected TokenStreamComponents createComponents(String string)
{
throw new UnsupportedOperationException("Not supported yet."); //To change body of generated methods, choose Tools | Templates.
}
}
Please suggest ways to help me access the expanded queries!
So, are you just trying to figure out how to iterate through the terms from the TokenStreamComponents in your main method?
Something like this:
TokenStreamComponents TSC = createComponents( "" , new StringReader("some text goes here"));
TokenStream stream = TSC.getTokenStream();
CharTermAttribute termattr = stream.addAttribute(CharTermAttribute.class);
stream.reset();
while (stream.incrementToken()) {
System.out.println(termattr.toString());
}

Apache FOP the method newInstance(FopFactoryConfig) in the type FopFactory is not applicable for the arguments ()

I am getting the following error:
Exception in thread "main" java.lang.Error: Unresolved compilation problem: The method newInstance(FopFactoryConfig) in the type FopFactory is not applicable for the arguments ()
at fopdemo.fopvass.PDFHandler.createPDFFile(PDFHandler.java:42)
at fopdemo.fopvass.TestPDF.main(TestPDF.java:40)
This is my code:
package fopdemo.fopvass;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.OutputStream;
import java.net.URL;
import javax.xml.bind.JAXBContext;
import javax.xml.bind.JAXBException;
import javax.xml.bind.Marshaller;
import javax.xml.transform.Result;
import javax.xml.transform.Source;
import javax.xml.transform.Transformer;
import javax.xml.transform.TransformerConfigurationException;
import javax.xml.transform.TransformerException;
import javax.xml.transform.TransformerFactory;
import javax.xml.transform.TransformerFactoryConfigurationError;
import javax.xml.transform.dom.DOMResult;
import javax.xml.transform.dom.DOMSource;
import javax.xml.transform.sax.SAXResult;
import javax.xml.transform.stream.StreamSource;
import org.apache.fop.apps.FOPException;
import org.apache.fop.apps.FOUserAgent;
import org.apache.fop.apps.Fop;
import org.apache.fop.apps.FopFactory;
import org.apache.fop.apps.MimeConstants;
public class PDFHandler {
public static final String EXTENSION = ".pdf";
public String PRESCRIPTION_URL = "template.xsl";
public String createPDFFile(ByteArrayOutputStream xmlSource, String templateFilePath) throws IOException {
File file = File.createTempFile("" + System.currentTimeMillis(), EXTENSION);
URL url = new File(templateFilePath + PRESCRIPTION_URL).toURI().toURL();
// creation of transform source
StreamSource transformSource = new StreamSource(url.openStream());
// create an instance of fop factory
FopFactory fopFactory = FopFactory.newInstance();
// a user agent is needed for transformation
FOUserAgent foUserAgent = fopFactory.newFOUserAgent();
// to store output
ByteArrayOutputStream pdfoutStream = new ByteArrayOutputStream();
StreamSource source = new StreamSource(new ByteArrayInputStream(xmlSource.toByteArray()));
Transformer xslfoTransformer;
try {
TransformerFactory transfact = TransformerFactory.newInstance();
xslfoTransformer = transfact.newTransformer(transformSource);
// Construct fop with desired output format
Fop fop;
try {
fop = fopFactory.newFop(MimeConstants.MIME_PDF, foUserAgent, pdfoutStream);
// Resulting SAX events (the generated FO)
// must be piped through to FOP
Result res = new SAXResult(fop.getDefaultHandler());
// Start XSLT transformation and FOP processing
try {
// everything will happen here..
xslfoTransformer.transform(source, res);
// if you want to save PDF file use the following code
OutputStream out = new java.io.FileOutputStream(file);
out = new java.io.BufferedOutputStream(out);
FileOutputStream str = new FileOutputStream(file);
str.write(pdfoutStream.toByteArray());
str.close();
out.close();
} catch (TransformerException e) {
e.printStackTrace();
}
} catch (FOPException e) {
e.printStackTrace();
}
} catch (TransformerConfigurationException e) {
e.printStackTrace();
} catch (TransformerFactoryConfigurationError e) {
e.printStackTrace();
}
return file.getPath();
}
public ByteArrayOutputStream getXMLSource(EmployeeData data) throws Exception {
JAXBContext context;
ByteArrayOutputStream outStream = new ByteArrayOutputStream();
try {
context = JAXBContext.newInstance(EmployeeData.class);
Marshaller m = context.createMarshaller();
m.setProperty(Marshaller.JAXB_FORMATTED_OUTPUT, Boolean.TRUE);
m.marshal(data, System.out);
m.marshal(data, outStream);
} catch (JAXBException e) {
e.printStackTrace();
}
return outStream;
}
}
This is where it is called:
package fopdemo.fopvass;
import java.io.ByteArrayOutputStream;
import java.util.ArrayList;
/**
* #author Debasmita.Sahoo
*
*/
public class TestPDF {
public static void main(String args[]){
System.out.println("Hi Testing");
ArrayList employeeList = new ArrayList();
String templateFilePath ="C:/Paula/Proyectos/fop/fopvass/resources/";
Employee e1= new Employee();
e1.setName("Debasmita1 Sahoo");
e1.setEmployeeId("10001");
e1.setAddress("Pune");
employeeList.add(e1);
Employee e2= new Employee();
e2.setName("Debasmita2 Sahoo");
e2.setEmployeeId("10002");
e2.setAddress("Test");
employeeList.add(e2);
Employee e3= new Employee();
e3.setName("Debasmita3 Sahoo");
e3.setEmployeeId("10003");
e3.setAddress("Mumbai");
employeeList.add(e3);
EmployeeData data = new EmployeeData();
data.setEemployeeList(employeeList);
PDFHandler handler = new PDFHandler();
try {
ByteArrayOutputStream streamSource = handler.getXMLSource(data);
handler.createPDFFile(streamSource,templateFilePath);
} catch (Exception e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
}
}
The following line won't compile:
FopFactory fopFactory = FopFactory.newInstance();
The method newInstance() requires a parameter. From the documentation (provided by Mathias Muller), under 'Basic Usage', that parameter refers to a configuration file:
FopFactory fopFactory = FopFactory.newInstance(
new File( "C:/Temp/fop.xconf" ) );
You need to provide it a File object. Alternatively, it is also possible to create a FopFactory instance by providing a URI to resolve relative URIs in the input file (because SVG files reference other SVG files). As the documentation suggests:
FopFactory fopFactory = FopFactory.newInstance(
new File(".").toURI() );
The user agent is the entity that allows you to interact with a single rendering run, i.e. the processing of a single document. If you wish to customize the user agent's behaviour, the first step is to create your own instance of FOUserAgent using the appropriate factory method on FopFactory and pass that to the factory method that will create a new Fop instance.
FopFactory.newInstance() support
FopFactory.newInstance(File)
FopFactory.newInstance(Uri)
FopFactory.newInstance(Uri, InputStream)
FopFactory.newInstance(FopFactoryConfig)
I successfully implemented on Windows and Mac OS (i.e. a Unix\Linux like NFS) the following code
URI uri = new File(config.getRoot()).toPath().toUri();
FopFactory fopFactory = FopFactory.newInstance(uri);
On the other hand, this does not work (and I don't understand why)
URI uri = new File(config.getRoot()).toUri();

Lucene Highlighter with stemming analyzer

I am using Lucene's Highlighter class to highlight fragments of matched search results and it works well. I would like to switch from searching with the StandardAnalyzer to the EnglishAnalyzer, which will perform stemming of terms.
The search results are good, but now the highlighter doesn't always find a match. Here's an example of what I'm looking at:
document field text 1: Everyone likes goats.
document field text 2: I have a goat that eats everything.
Using the EnglishAnalyzer and searching for "goat", both documents are matched, but the highlighter is only able to find a matched fragment from document 2. Is there a way to have the highlighter return data for both documents?
I understand that the characters are different for the tokens, but the same tokens are still there, so it seems reasonable for it to just highlight whatever token is present at that location.
If it helps, this is using Lucene 3.5.
I found a solution to this problem. I changed from using the Highlighter class to using the FastVectorHighlighter. It looks like I'll pick up some speed improvements too (at the expense of storage of term vector data). For the benefit of anyone coming across this question later, here's a unit test showing how this all works together:
package com.sample.index;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.en.EnglishAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexWriterConfig;
import org.apache.lucene.queryParser.ParseException;
import org.apache.lucene.queryParser.QueryParser;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.ScoreDoc;
import org.apache.lucene.search.TopDocs;
import org.apache.lucene.search.vectorhighlight.*;
import org.apache.lucene.store.RAMDirectory;
import org.apache.lucene.util.Version;
import org.junit.Before;
import org.junit.Test;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import static junit.framework.Assert.assertEquals;
public class TestIndexStuff {
public static final String FIELD_NORMAL = "normal";
public static final String[] PRE_TAGS = new String[]{"["};
public static final String[] POST_TAGS = new String[]{"]"};
private IndexSearcher searcher;
private Analyzer analyzer = new EnglishAnalyzer(Version.LUCENE_35);
#Before
public void init() throws IOException {
RAMDirectory idx = new RAMDirectory();
IndexWriterConfig config = new IndexWriterConfig(Version.LUCENE_35, analyzer);
IndexWriter writer = new IndexWriter(idx, config);
addDocs(writer);
writer.close();
searcher = new IndexSearcher(IndexReader.open(idx));
}
private void addDocs(IndexWriter writer) throws IOException {
for (String text : new String[] {
"Pretty much everyone likes goats.",
"I have a goat that eats everything.",
"goats goats goats goats goats"}) {
Document doc = new Document();
doc.add(new Field(FIELD_NORMAL, text, Field.Store.YES,
Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
writer.addDocument(doc);
}
}
private FastVectorHighlighter makeHighlighter() {
FragListBuilder fragListBuilder = new SimpleFragListBuilder(200);
FragmentsBuilder fragmentBuilder = new SimpleFragmentsBuilder(PRE_TAGS, POST_TAGS);
return new FastVectorHighlighter(true, true, fragListBuilder, fragmentBuilder);
}
#Test
public void highlight() throws ParseException, IOException {
Query query = new QueryParser(Version.LUCENE_35, FIELD_NORMAL, analyzer)
.parse("goat");
FastVectorHighlighter highlighter = makeHighlighter();
FieldQuery fieldQuery = highlighter.getFieldQuery(query);
TopDocs topDocs = searcher.search(query, 10);
List<String> fragments = new ArrayList<String>();
for (ScoreDoc scoreDoc : topDocs.scoreDocs) {
fragments.add(highlighter.getBestFragment(fieldQuery, searcher.getIndexReader(),
scoreDoc.doc, FIELD_NORMAL, 10000));
}
assertEquals(3, fragments.size());
assertEquals("[goats] [goats] [goats] [goats] [goats]", fragments.get(0).trim());
assertEquals("Pretty much everyone likes [goats].", fragments.get(1).trim());
assertEquals("I have a [goat] that eats everything.", fragments.get(2).trim());
}
}