using a sparql query as a building block in another query - sparql

I can write a SPARQL query to concat words in a sentence found in individual triples to a single sentence:
SELECT ?sent (sample (?sf1) as ?sf11) (group_concat(?wf) as ?sf2)
WHERE
{ ?sent a nlp:Sentence .
{select *
{ ?w rdfs:partOf ?sent .
?w a nlp:Token .
?w nlp:wordForm ?wf .
} order by ?w
}
} group by ?sent
limit 20
but I cannot find a way to use this select statement in another query, where I find the sentence ?sent and would like to insert this select statement:
select *
where
{ ?tok a wn:Locomote.
?tok nlp:lemma3 ?lem .
?tok rdfs:partOf ?sent .
?sent a nlp:Sentence .
?werk ^rdfs:partOf ?sent .
{SELECT ?sent (group_concat(?wf) as ?sf2)
WHERE
{ ?sent a nlp:Sentence .
{select *
{ ?w rdfs:partOf ?sent .
?w a nlp:Token .
?w nlp:wordForm ?wf .
} order by ?w
}
} group by ?sent
}
}
The result is a not the sentence found in the first part, but it seems as ?sent in the nested query is not restricted by the outer query.
I do not see how to nest properly.
Thank you for help!

Example of custom aggregate in Jena:
package org.stackoverflow.jena.customaggregate;
import com.google.common.base.Joiner;
import org.apache.jena.atlas.logging.LogCtl;
import org.apache.jena.graph.Graph;
import org.apache.jena.query.*;
import org.apache.jena.rdf.model.ModelFactory;
import org.apache.jena.sparql.engine.binding.Binding;
import org.apache.jena.sparql.expr.Expr;
import org.apache.jena.sparql.expr.ExprEvalException;
import org.apache.jena.sparql.expr.ExprList;
import org.apache.jena.sparql.expr.NodeValue;
import org.apache.jena.sparql.expr.aggregate.Accumulator;
import org.apache.jena.sparql.expr.aggregate.AccumulatorFactory;
import org.apache.jena.sparql.expr.aggregate.AggCustom;
import org.apache.jena.sparql.expr.aggregate.AggregateRegistry;
import org.apache.jena.sparql.function.FunctionEnv;
import org.apache.jena.sparql.graph.NodeConst;
import org.apache.jena.sparql.sse.SSE;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
public class AggGroupConcatSorted {
static {
LogCtl.setCmdLogging();
}
static AccumulatorFactory accumulatorFactory = (agg, distinct) -> new AccGroupConcatSorted(agg);
static class AccGroupConcatSorted implements Accumulator {
private AggCustom agg;
private List<String> nodeStrList = new ArrayList<>();
AccGroupConcatSorted(AggCustom agg) {
this.agg = agg;
}
#Override
public void accumulate(Binding binding, FunctionEnv functionEnv) {
ExprList exprList = agg.getExprList();
for (Expr expr : exprList) {
try {
NodeValue nv = expr.eval(binding, functionEnv);
// Evaluation succeeded, add string to list
nodeStrList.add(nv.asString());
} catch (ExprEvalException ex) {
}
}
}
#Override
public NodeValue getValue() {
// sort list
Collections.sort(nodeStrList);
// return single node which in fact is the concatenated string of the list elements
return NodeValue.makeString(Joiner.on(" ").join(nodeStrList));
}
}
public static void main(String[] args) {
// Example aggregate that concatenates the node values in sorted order
String aggUri = "http://example.org/group_concat_sorted" ;
// register the custom aggregate - returns unbound for no rows
AggregateRegistry.register(aggUri, accumulatorFactory, NodeConst.nodeMinusOne);
// sample data
Graph g = SSE.parseGraph("(graph (:s :p \"b\") (:s :p \"bc\") (:s :p \"abc\"))") ;
String qs = "SELECT (<http://example.org/group_concat_sorted>(?o) AS ?x) {?s ?p ?o}" ;
// query execution
Query q = QueryFactory.create(qs) ;
try ( QueryExecution qexec = QueryExecutionFactory.create(q, ModelFactory.createModelForGraph(g)) ) {
ResultSet rs = qexec.execSelect() ;
ResultSetFormatter.out(rs);
}
}
}

Related

Jena dbpedia how to use (STR(?foo) AS ?foo2)?

i want to avoid using substring to eliminate by example #en in my
ontology:abstract or ontology:label, it work with sparql explorer but when i use http query in jena it dont give any result... i tried this query in android project with jena library (androjena)
SELECT ?type (STR(?l) AS ?label) {
?type a owl:Class;
rdfs:label ?l .
FILTER (LANG(?l) = "en")
}
once i put this in my Jena httpquery (STR(?l) AS ?label)it dont give any result anymore. someone can help me?
here is the part of the code that i try to manage to change to avoid use of substrings :
private String entityQuery(String entity, String keyWord, String language) {
return addPrefix("rdfs: <http://www.w3.org/2000/01/rdf-schema#>") +
addPrefix("ontology: <http://dbpedia.org/ontology/>") +
addQuery("SELECT ?name ?desc ?thumb WHERE {\n"
+"?author a ontology:" + entity + ";\n"
+"rdfs:label ?name;\n"
+"ontology:abstract ?desc.\n"
+"FILTER(<bif:contains>(?desc,\"'"+keyWord+"'\") && langMatches(lang(?desc), \""+language+"\") " +
"&& langMatches( lang(?name), \""+language+"\"))\n"
+"OPTIONAL { ?author ontology:thumbnail ?thumb }.\n"
+"}ORDER BY ?name\n");
}
private LinkedList<Entity> collectEntities(ResultSet results) {
LinkedList<Entity> temp = new LinkedList<>();
/* do stuff with the results */
while (results.hasNext()) {
Entity a = new Entity();
QuerySolution row = results.next();
if (row.getResource("thumb") != null)
a.setPictureURL(row.get("thumb").toString());
a.setTitle(row.get("name").toString().substring(0, row.get("name").toString().indexOf("#")));
a.setSummary(row.get("desc").toString().substring(0, row.get("desc").toString().indexOf("#")));
temp.add(a);
}
return temp;
}
private String addPrefix(String prefix) {
return "PREFIX " + prefix + "\n";
}
private String addQuery(String query) {
return query;
}
i use substring here :
a.setTitle(row.get("name").toString().substring(0, row.get("name").toString().indexOf("#")));
a.setSummary(row.get("desc").toString().substring(0, row.get("desc").toString().indexOf("#")));

How to perform delete query in jena sparql?

This question might sound lame but I am really confused...This is my code for normal query :
public class test4query extends Object {
public static String[] arr=new String[30];
public void mai (String s) {
String directory = "EMAILADDRESS" ;
Dataset ds = TDBFactory.createDataset(directory) ;
ds.begin(ReadWrite.READ) ;
Model model = ds.getDefaultModel() ;
QueryExecution qExec = QueryExecutionFactory.create(s, ds) ;
int i=0;
try{
ResultSet rs = qExec.execSelect() ;
String x=rs.toString();
while (rs.hasNext()) {
QuerySolution qs = rs.next();
String rds;
if(qs.get("x")!=null) {
rds = qs.get("x").toString();
} else {
rds="hi";
}
if(rds==null) {
break;
}
System.out.println(rds);
arr[i] = rds;
i++;
}
} finally
{qExec.close() ;
ds.commit();
ds.end();
}
}
}
But this does not work for delete queries ..It shows error :
Was expecting one of:
"base" ...
"prefix" ...
"select" ...
"describe" ...
"construct" ...
"ask" ...
I know some changes need to be made for update queries?Can somebody give some hint?Any link will be helpful!!
SPARQL Query and SPARQL Update are different languages, and there are different factories for parsing them. QueryFactory is for the SPARQL 1.1 Query Language. For the SPARQL 1.1 Update, you need to use UpdateFactory.

errors in transaction in jena tdb?

I am trying to write propreties into a model and then query it.This part of mycode:
String directory = "EMAILADDRESS" ;
//create the dataset for the tdb store
Dataset ds = TDBFactory.createDataset(directory) ;
//create default rdf model
ds.begin(ReadWrite.WRITE);
Model model = ds.getDefaultModel() ;
//write to the tdb dataset
When I write this and then query the query shows no result ...but when I interchange the order of model and begin i.e.
Model model = ds.getDefaultModel() ;
//write to the tdb dataset
ds.begin(ReadWrite.WRITE);
Then it works fine!! but it sometimes gives this error:
com.hp.hpl.jena.tdb.transaction.TDBTransactionException: Not in a transaction
I know that first way is correct but I don't understand why it doesn't respond to queries..This is code for quering:
public class test4query extends Object {
public static String[] arr=new String[30];
public void mai (String s) {
String directory = "EMAILADDRESS" ;
Dataset ds = TDBFactory.createDataset(directory) ;
ds.begin(ReadWrite.READ) ;
Model model = ds.getDefaultModel() ;
QueryExecution qExec = QueryExecutionFactory.create(s, ds) ;
int i=0;
try{
ResultSet rs = qExec.execSelect() ;
String x=rs.toString();
while (rs.hasNext()) {
QuerySolution qs = rs.next();
String rds;
if(qs.get("x")!=null) {
rds = qs.get("x").toString();
} else {
rds="hi";
}
if(rds==null) {
break;
}
System.out.println(rds);
arr[i] = rds;
i++;
}
} finally
{qExec.close() ;
ds.commit();
ds.end();
}
}
}
It is unclear when you get hat exception. The code example is full of parts that are commented out and does not use "m" at all.
You can not call ResultSetFormatter.out(rs) after you have called qExec.close or ds.commit.

Best Practices on querying for multiple properties of a resource in the same SPARQL query

On my database, I have triples like:
DocumentUri -> dc.title -> title
DocumentUri -> dc.language -> language
DocumentUri -> dc.description -> description
DocumentUri -> dc.creator -> AuthorUri
I'd like to be able to search for a documenttitle and then get all the properties from all the documents matching the title search.
I'm trying to do that with Jena and SPARQL. I made a query that receives a title to get the Uris from the documents that have the given title. That's the method, it gets the uris returned and store them in a list called webDocumentListInicial:
public void searchUriByTitle() {
RDFNode documentUriNode;
String queryString = "PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> " +
"PREFIX dc: <http://purl.org/dc/elements/1.1/> SELECT ?document WHERE { " +
"?document dc:title ?title." +
"FILTER (?title = \"" + this.getTitle() + "\" ). }";
Query query = QueryFactory.create(queryString);
QueryExecution qe = QueryExecutionFactory.create(query, databaseModel);
ResultSet results = qe.execSelect();
while( results.hasNext() ) {
QuerySolution querySolution = results.next();
documentUriNode = querySolution.get("document");
WebDocument document = new WebDocument(documentUriNode.toString());
this.webDocumentListInicial.add(document);
}
qe.close();
}
To get the document's creator I made another query, because in this case tha value from the triple is another resource. Here, I iterate the list of document URIs that was filled in the method above.
public void searchAuthorByTitle() {
for( WebDocument doc : this.webDocumentListInicial ) {
RDFNode authorUriNode;
String queryString = "PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> " +
"PREFIX dc: <http://purl.org/dc/elements/1.1/> SELECT ?author WHERE { " +
"?document dc:creator ?author." +
"FILTER (?document = <" + doc.getUri() + "> ). }";
Query query = QueryFactory.create(queryString);
QueryExecution qe = QueryExecutionFactory.create(query, databaseModel);
ResultSet results = qe.execSelect();
while( results.hasNext() ) {
QuerySolution querySolution = results.next();
authorUriNode = querySolution.get("author");
WebAuthor author;
author = this.searchAuthorProperties(authorUriNode.toString(), new WebAuthor(authorUriNode.toString()) );
doc.addAuthor(author);
}
qe.close();
}
}
And to get the other document properties, I do like in the example below, where I iterate the list that was filled in the first method I showed above.
public void searchDescription() {
for( WebDocument doc : this.webDocumentListInicial ) {
String description = "";
Resource resource = ResourceFactory.createResource(doc.getUri());
StmtIterator descriptionStmtIt = databaseModel.listStatements(resource, DC.description,(RDFNode) null);
while( descriptionStmtIt.hasNext() ) {
description = descriptionStmtIt.next().getObject().toString();
}
doc.setDescription(description);
}
}
This way I'm handling with the data isn't very productive because I need a different query for each property I get.
Is it possible to make only one query to get the document URI and all the other document's properties at once? I tried that once, like this:
String queryString = "PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> " +
"PREFIX dc: <http://purl.org/dc/elements/1.1/> SELECT ?document ?description " +
"?language ?author WHERE { " +
"?document dc:title ?title." +
"?document dc.language ?language" +
"?document dc.description ?description" +
"?document dc.creator ?author" +
"FILTER (?title = \"" + this.getTitle() + "\" ). }";
But when I had more than one document matching the given title, it was difficult to know which properties returned belonged to each document.
Thank you!!
Building a better query
It sounds like you're doing a lot more work than you need to. If you have data like this:
#prefix : <http://stackoverflow.com/q/20436820/1281433/>
:doc1 :title "Title1" ; :author :author1 ; :date "date-1" .
:doc2 :title "Title2" ; :author :author2 ; :date "date-2" .
:doc3 :title "Title3" ; :author :author3 ; :date "date-3" .
:doc4 :title "Title4" ; :author :author4 ; :date "date-4" .
:doc5 :title "Title5" ; :author :author5 ; :date "date-5" .
And a list of titles, say "Title1" "Title4" "Title5" and you want retrieve the resource of the document with each title, along with the associated author and date, you can use a query like this:
prefix : <http://stackoverflow.com/q/20436820/1281433/>
select ?document ?author ?date where {
values ?title { "Title1" "Title4" "Title5" }
?document :title ?title ;
:author ?author ;
:date ?date .
}
You'll get results like this in one ResultSet. There's no need to make multiple queries.
----------------------------------
| document | author | date |
==================================
| :doc1 | :author1 | "date-1" |
| :doc4 | :author4 | "date-4" |
| :doc5 | :author5 | "date-5" |
----------------------------------
Building a map of the results
Based on your comments, it sounds like you need to construct some other kind of associative structure from the ResultSet. Here's one way that you could construct a Map<RDFNode,Map<String,RDFNode>> that takes each document IRI to another map that takes each of the variable named to the associated value.
import java.io.ByteArrayInputStream;
import java.io.IOException;
import java.io.InputStream;
import java.util.HashMap;
import java.util.Iterator;
import java.util.Map;
import com.hp.hpl.jena.query.QueryExecutionFactory;
import com.hp.hpl.jena.query.QuerySolution;
import com.hp.hpl.jena.query.ResultSet;
import com.hp.hpl.jena.rdf.model.Model;
import com.hp.hpl.jena.rdf.model.ModelFactory;
import com.hp.hpl.jena.rdf.model.RDFNode;
public class HashedResultsExample {
final static String DATA =
"#prefix : <http://stackoverflow.com/q/20436820/1281433/>\n" +
"\n" +
":doc1 :title 'Title1' ; :author :author1 ; :date 'date-1' .\n" +
":doc2 :title 'Title2' ; :author :author2 ; :date 'date-2' .\n" +
":doc3 :title 'Title3' ; :author :author3 ; :date 'date-3' .\n" +
":doc4 :title 'Title4' ; :author :author4 ; :date 'date-4' .\n" +
":doc5 :title 'Title5' ; :author :author5 ; :date 'date-5' .\n" ;
final static String QUERY =
"prefix : <http://stackoverflow.com/q/20436820/1281433/>\n" +
"select ?document ?author ?date where {\n" +
" values ?title { \"Title1\" \"Title4\" \"Title5\" }\n" +
" ?document :title ?title ; :author ?author ; :date ?date .\n" +
"}" ;
public static void main(String[] args) throws IOException {
final Model model = ModelFactory.createDefaultModel();
try ( final InputStream in = new ByteArrayInputStream( DATA.getBytes() )) {
model.read( in, null, "TTL" );
}
final ResultSet rs = QueryExecutionFactory.create( QUERY, model ).execSelect();
final Map<RDFNode,Map<String,RDFNode>> map = new HashMap<>();
while ( rs.hasNext() ) {
final QuerySolution qs = rs.next();
final Map<String,RDFNode> rowMap = new HashMap<>();
for ( final Iterator<String> varNames = qs.varNames(); varNames.hasNext(); ) {
final String varName = varNames.next();
rowMap.put( varName, qs.get( varName ));
}
map.put( qs.get( "document" ), rowMap );
}
System.out.println( map );
}
}
The output (since the map is printed at the end) with some newlines for readability is:
{http://stackoverflow.com/q/20436820/1281433/doc4=
{author=http://stackoverflow.com/q/20436820/1281433/author4,
document=http://stackoverflow.com/q/20436820/1281433/doc4,
date=date-4},
http://stackoverflow.com/q/20436820/1281433/doc1=
{author=http://stackoverflow.com/q/20436820/1281433/author1,
document=http://stackoverflow.com/q/20436820/1281433/doc1,
date=date-1},
http://stackoverflow.com/q/20436820/1281433/doc5=
{author=http://stackoverflow.com/q/20436820/1281433/author5,
document=http://stackoverflow.com/q/20436820/1281433/doc5,
date=date-5}}

ANTLR: Heterogeneous AST and imaginary tokens

it's my first question here :)
I'd like to build an heterogeneous AST with ANTLR for a simple grammar. There are different Interfaces to represent the AST nodes, e. g. IInfiExp, IVariableDecl. ANTLR comes up with CommonTree to hold all the information of the source code (line number, character position etc.) and I want to use this as a base for the implementations of the AST interfacese IInfixExp ...
In order to get an AST as output with CommonTree as node types, I set:
options {
language = Java;
k = 1;
output = AST;
ASTLabelType = CommonTree;
}
The IInifxExp is:
package toylanguage;
public interface IInfixExp extends IExpression {
public enum Operator {
PLUS, MINUS, TIMES, DIVIDE;
}
public Operator getOperator();
public IExpression getLeftHandSide();
public IExpression getRightHandSide();
}
and the implementation InfixExp is:
package toylanguage;
import org.antlr.runtime.Token;
import org.antlr.runtime.tree.CommonTree;
// IInitializable has only void initialize()
public class InfixExp extends CommonTree implements IInfixExp, IInitializable {
private Operator operator;
private IExpression leftHandSide;
private IExpression rightHandSide;
InfixExp(Token token) {
super(token);
}
#Override
public Operator getOperator() {
return operator;
}
#Override
public IExpression getLeftHandSide() {
return leftHandSide;
}
#Override
public IExpression getRightHandSide() {
return rightHandSide;
}
// from IInitializable. get called from ToyTreeAdaptor.rulePostProcessing
#Override
public void initialize() {
// term ((PLUS|MINUS) term)+
// atom ((TIMES|DIIDE) atom)+
// exact 2 children
assert getChildCount() == 2;
// left and right child are IExpressions
assert getChild(0) instanceof IExpression
&& getChild(1) instanceof IExpression;
// operator
switch (token.getType()) {
case ToyLanguageParser.PLUS:
operator = Operator.PLUS;
break;
case ToyLanguageParser.MINUS:
operator = Operator.MINUS;
break;
case ToyLanguageParser.TIMES:
operator = Operator.TIMES;
break;
case ToyLanguageParser.DIVIDE:
operator = Operator.DIVIDE;
break;
default:
assert false;
}
// left and right operands
leftHandSide = (IExpression) getChild(0);
rightHandSide = (IExpression) getChild(1);
}
}
The corresponding rules are:
exp // e.g. a+b
: term ((PLUS<InfixExp>^|MINUS<InfixExp>^) term)*
;
term // e.g. a*b
: atom ((TIMES<InfixExp>^|DIVIDE<InfixExp>^) atom)*
;
This works fine, becouse PLUS, MINUS etc. are "real" tokens.
But now comes to the imaginary token:
tokens {
PROGRAM;
}
The corresponding rule is:
program // e.g. var a, b; a + b
: varDecl* exp
-> ^(PROGRAM<Program> varDecl* exp)
;
With this, ANTLR doesn't create a tree with PROGRAM as root node.
In the parser, the following code creates the Program instance:
root_1 = (CommonTree)adaptor.becomeRoot(new Program(PROGRAM), root_1);
Unlike InfixExp not the Program(Token) constructor but Program(int) is invoked.
Program is:
package toylanguage;
import java.util.Collections;
import java.util.LinkedList;
import java.util.List;
import org.antlr.runtime.Token;
import org.antlr.runtime.tree.CommonTree;
class Program extends CommonTree implements IProgram, IInitializable {
private final LinkedList<IVariableDecl> variableDeclarations = new LinkedList<IVariableDecl>();
private IExpression expression = null;
Program(Token token) {
super(token);
}
public Program(int tokeType) {
// What to do?
super();
}
#Override
public List<IVariableDecl> getVariableDeclarations() {
// don't allow to change the list
return Collections.unmodifiableList(variableDeclarations);
}
#Override
public IExpression getExpression() {
return expression;
}
#Override
public void initialize() {
// program: varDecl* exp;
// at least one child
assert getChildCount() > 0;
// the last one is a IExpression
assert getChild(getChildCount() - 1) instanceof IExpression;
// iterate over varDecl*
int i = 0;
while (getChild(i) instanceof IVariableDecl) {
variableDeclarations.add((IVariableDecl) getChild(i));
i++;
}
// exp
expression = (IExpression) getChild(i);
}
}
you can see the constructor:
public Program(int tokeType) {
// What to do?
super();
}
as a result of it, with super() a CommonTree ist build without a token. So CommonTreeAdaptor.rulePostProcessing see a flat list, not a tree with a Token as root.
My TreeAdaptor looks like:
package toylanguage;
import org.antlr.runtime.tree.CommonTreeAdaptor;
public class ToyTreeAdaptor extends CommonTreeAdaptor {
public Object rulePostProcessing(Object root) {
Object result = super.rulePostProcessing(root);
// check if needs initialising
if (result instanceof IInitializable) {
IInitializable initializable = (IInitializable) result;
initializable.initialize();
}
return result;
};
}
And to test anything I use:
package toylanguage;
import org.antlr.runtime.ANTLRStringStream;
import org.antlr.runtime.CommonTokenStream;
import org.antlr.runtime.RecognitionException;
import org.antlr.runtime.TokenStream;
import org.antlr.runtime.tree.CommonTree;
import toylanguage.ToyLanguageParser.program_return;
public class Processor {
public static void main(String[] args) {
String input = "var a, b; a + b + 123"; // sample input
ANTLRStringStream stream = new ANTLRStringStream(input);
ToyLanguageLexer lexer = new ToyLanguageLexer(stream);
TokenStream tokens = new CommonTokenStream(lexer);
ToyLanguageParser parser = new ToyLanguageParser(tokens);
ToyTreeAdaptor treeAdaptor = new ToyTreeAdaptor();
parser.setTreeAdaptor(treeAdaptor);
try {
// test with: var a, b; a + b
program_return program = parser.program();
CommonTree root = program.tree;
// prints 'a b (+ a b)'
System.out.println(root.toStringTree());
// get (+ a b), the third child of root
CommonTree third = (CommonTree) root.getChild(2);
// prints '(+ a b)'
System.out.println(third.toStringTree());
// prints 'true'
System.out.println(third instanceof IInfixExp);
// prints 'false'
System.out.println(root instanceof IProgram);
} catch (RecognitionException e) {
e.printStackTrace();
}
}
}
For completeness, here is the full grammar:
grammar ToyLanguage;
options {
language = Java;
k = 1;
output = AST;
ASTLabelType = CommonTree;
}
tokens {
PROGRAM;
}
#header {
package toylanguage;
}
#lexer::header {
package toylanguage;
}
program // e.g. var a, b; a + b
: varDecl* exp
-> ^(PROGRAM<Program> varDecl* exp)
;
varDecl // e.g. var a, b;
: 'var'! ID<VariableDecl> (','! ID<VariableDecl>)* ';'!
;
exp // e.g. a+b
: term ((PLUS<InfixExp>^|MINUS<InfixExp>^) term)*
;
term // e.g. a*b
: atom ((TIMES<InfixExp>^|DIVIDE<InfixExp>^) atom)*
;
atom
: INT<IntegerLiteralExp> // e.g. 123
| ID<VariableExp> // e.g. a
| '(' exp ')' -> exp // e.g. (a+b)
;
INT : ('0'..'9')+ ;
ID : ('a'..'z')+ ;
PLUS : '+' ;
MINUS : '-' ;
TIMES : '*' ;
DIVIDE : '/' ;
WS : ('\t' | '\n' | '\r' | ' ')+ { $channel = HIDDEN; } ;
OK, the final question is how to get from
program // e.g. var a, b; a + b
: varDecl* exp
-> ^(PROGRAM<Program> varDecl* exp)
;
a tree with PROGRAM as root
^(PROGRAM varDecl* exp)
and not a flat list with
(varDecl* exp) ?
(Sorry for this numerous code fragments)
Ciao Vertex
Try creating the following constructor:
public Program(int tokenType) {
super(new CommonToken(tokenType, "PROGRAM"));
}