SCALA: executing complete .sql file - sql

I have a .sql script files and need to execute complete .sql file at once, I have found many related answers, where programmer reads file and execute query one by one.
I require to run .sql file all at-once, like picking up the file and executing in within SCALA.
Currently I am doing it with following code, copied it from copied it from:https://gist.github.com/joe776/831762, my Lead ask me to do it in simple way, instead of executing script line by line, it should get executed as .SQL file
import java.io.IOException;
import java.io.LineNumberReader;
import java.io.PrintWriter;
import java.io.Reader;
import java.sql.Connection;
import java.sql.ResultSet;
import java.sql.ResultSetMetaData;
import java.sql.SQLException;
import java.sql.Statement;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
public class ScriptRunner {
private static final String DEFAULT_DELIMITER = ";";
private static final String DELIMITER_LINE_REGEX = "(?i)DELIMITER.+";
private static final String DELIMITER_LINE_SPLIT_REGEX = "(?i)DELIMITER";
private final Connection connection;
private final boolean stopOnError;
private final boolean autoCommit;
private PrintWriter logWriter = new PrintWriter(System.out);
private PrintWriter errorLogWriter = new PrintWriter(System.err);
private String delimiter = DEFAULT_DELIMITER;
private boolean fullLineDelimiter = false;
/**
* Default constructor.
*
* #param connection
* #param autoCommit
* #param stopOnError
*/
public ScriptRunner(Connection connection, boolean autoCommit, boolean stopOnError) {
this.connection = connection;
this.autoCommit = autoCommit;
this.stopOnError = stopOnError;
}
/**
* #param delimiter
* #param fullLineDelimiter
*/
public void setDelimiter(String delimiter, boolean fullLineDelimiter) {
this.delimiter = delimiter;
this.fullLineDelimiter = fullLineDelimiter;
}
/**
* Setter for logWriter property.
*
* #param logWriter
* - the new value of the logWriter property
*/
public void setLogWriter(PrintWriter logWriter) {
this.logWriter = logWriter;
}
/**
* Setter for errorLogWriter property.
*
* #param errorLogWriter
* - the new value of the errorLogWriter property
*/
public void setErrorLogWriter(PrintWriter errorLogWriter) {
this.errorLogWriter = errorLogWriter;
}
/**
* Runs an SQL script (read in using the Reader parameter).
*
* #param reader
* - the source of the script
* #throws SQLException
* if any SQL errors occur
* #throws IOException
* if there is an error reading from the Reader
*/
public void runScript(Reader reader) throws IOException, SQLException {
try {
boolean originalAutoCommit = connection.getAutoCommit();
try {
if (originalAutoCommit != autoCommit) {
connection.setAutoCommit(autoCommit);
}
runScript(connection, reader);
} finally {
connection.setAutoCommit(originalAutoCommit);
}
} catch (IOException e) {
throw e;
} catch (SQLException e) {
throw e;
} catch (Exception e) {
throw new RuntimeException("Error running script. Cause: " + e, e);
}
}
/**
* Runs an SQL script (read in using the Reader parameter) using the connection passed in.
*
* #param conn
* - the connection to use for the script
* #param reader
* - the source of the script
* #throws SQLException
* if any SQL errors occur
* #throws IOException
* if there is an error reading from the Reader
*/
private void runScript(Connection conn, Reader reader) throws IOException, SQLException {
StringBuffer command = null;
try {
LineNumberReader lineReader = new LineNumberReader(reader);
String line = null;
while ((line = lineReader.readLine()) != null) {
if (command == null) {
command = new StringBuffer();
}
String trimmedLine = line.trim();
if (trimmedLine.startsWith("--")) {
println(trimmedLine);
} else if (trimmedLine.length() < 1 || trimmedLine.startsWith("//")) {
// Do nothing
} else if (trimmedLine.length() < 1 || trimmedLine.startsWith("--")) {
// Do nothing
} else if (!fullLineDelimiter && trimmedLine.endsWith(getDelimiter())
|| fullLineDelimiter && trimmedLine.equals(getDelimiter())) {
Pattern pattern = Pattern.compile(DELIMITER_LINE_REGEX);
Matcher matcher = pattern.matcher(trimmedLine);
if (matcher.matches()) {
setDelimiter(trimmedLine.split(DELIMITER_LINE_SPLIT_REGEX)[1].trim(),
fullLineDelimiter);
line = lineReader.readLine();
if (line == null) {
break;
}
trimmedLine = line.trim();
}
command.append(line.substring(0, line.lastIndexOf(getDelimiter())));
command.append(" ");
Statement statement = conn.createStatement();
println(command);
boolean hasResults = false;
if (stopOnError) {
hasResults = statement.execute(command.toString());
} else {
try {
statement.execute(command.toString());
} catch (SQLException e) {
e.fillInStackTrace();
printlnError("Error executing: " + command);
printlnError(e);
}
}
if (autoCommit && !conn.getAutoCommit()) {
conn.commit();
}
ResultSet rs = statement.getResultSet();
if (hasResults && rs != null) {
ResultSetMetaData md = rs.getMetaData();
int cols = md.getColumnCount();
for (int i = 0; i < cols; i++) {
String name = md.getColumnLabel(i);
print(name + "\t");
}
println("");
while (rs.next()) {
for (int i = 1; i <= cols; i++) {
String value = rs.getString(i);
print(value + "\t");
}
println("");
}
}
command = null;
try {
if (rs != null) {
rs.close();
}
} catch (Exception e) {
e.printStackTrace();
}
try {
if (statement != null) {
statement.close();
}
} catch (Exception e) {
e.printStackTrace();
// Ignore to workaround a bug in Jakarta DBCP
}
} else {
Pattern pattern = Pattern.compile(DELIMITER_LINE_REGEX);
Matcher matcher = pattern.matcher(trimmedLine);
if (matcher.matches()) {
setDelimiter(trimmedLine.split(DELIMITER_LINE_SPLIT_REGEX)[1].trim(),
fullLineDelimiter);
line = lineReader.readLine();
if (line == null) {
break;
}
trimmedLine = line.trim();
}
command.append(line);
command.append(" ");
}
}
if (!autoCommit) {
conn.commit();
}
} catch (SQLException e) {
e.fillInStackTrace();
printlnError("Error executing: " + command);
printlnError(e);
throw e;
} catch (IOException e) {
e.fillInStackTrace();
printlnError("Error executing: " + command);
printlnError(e);
throw e;
} finally {
conn.rollback();
flush();
}
}
private String getDelimiter() {
return delimiter;
}
private void print(Object o) {
if (logWriter != null) {
logWriter.print(o);
}
}
private void println(Object o) {
if (logWriter != null) {
logWriter.println(o);
}
}
private void printlnError(Object o) {
if (errorLogWriter != null) {
errorLogWriter.println(o);
}
}
private void flush() {
if (logWriter != null) {
logWriter.flush();
}
if (errorLogWriter != null) {
errorLogWriter.flush();
}
}
}

Why did you tag "scala"?
To execute a whole sql file at once:
sql < myFile.sql
Edit: as Cyrille Corpet pointed out, you can perform this command directly via scala
import sys.process._
"sql < myfile.sql" !

Related

jvm condition and locksupport which is faster?

An experimental example of synchronous call is made. Each thread task waits for a task callback with its own value of + 1. The performance difference between condition and locksupport is compared. The result is unexpected. The two times are the same, but the difference on the flame diagram is very big. Does it mean that the JVM has not optimized locksupport
enter image description here
public class LockerTest {
static int max = 100000;
static boolean usePark = true;
static Map<Long, Long> msg = new ConcurrentHashMap<>();
static ExecutorService producer = Executors.newFixedThreadPool(4);
static ExecutorService consumer = Executors.newFixedThreadPool(16);
static AtomicLong record = new AtomicLong(0);
static CountDownLatch latch = new CountDownLatch(max);
static ReentrantLock lock = new ReentrantLock();
static Condition cond = lock.newCondition();
static Map<Long, Thread> parkMap = new ConcurrentHashMap<>();
static AtomicLong cN = new AtomicLong(0);
public static void main(String[] args) throws InterruptedException {
long start = System.currentTimeMillis();
for (int num = 0; num < max; num++) {
consumer.execute(() -> {
long id = record.incrementAndGet();
msg.put(id, -1L);
call(id);
if (usePark) {
Thread thread = Thread.currentThread();
parkMap.put(id, thread);
while (msg.get(id) == -1) {
cN.incrementAndGet();
LockSupport.park(thread);
}
} else {
lock.lock();
try {
while (msg.get(id) == -1) {
cN.incrementAndGet();
cond.await();
}
} catch (InterruptedException e) {
e.printStackTrace();
} finally {
lock.unlock();
}
}
latch.countDown();
});
}
latch.await();
consumer.shutdown();
producer.shutdown();
System.out.printf("park %s suc %s cost %s cn %s"
, usePark
, msg.entrySet().stream().noneMatch(entry -> entry.getKey() + 1 != entry.getValue())
, System.currentTimeMillis() - start
, cN.get()
);
}
private static void call(long id) {
producer.execute(() -> {
try {
Thread.sleep((id * 13) % 100);
} catch (InterruptedException e) {
e.printStackTrace();
}
if (usePark) {
msg.put(id, id + 1);
LockSupport.unpark(parkMap.remove(id));
} else {
lock.lock();
try {
msg.put(id, id + 1);
cond.signalAll();
} finally {
lock.unlock();
}
}
});
}
}

i want to know where to put my java code in opendaylight project , load balancing routing

i am new to opendaylight i have installed mininet and connected to the controller successfully. I want to implement dynamic load balancing on the controller i have the java code with me the problem is I donot know where to put it
**
* #file
LbRoutingImplementation.java
*
*
* #brief
Implementation of a routing engine using
* lb_routing. Implementation of lb_routing
come from Jung2 library
*
*/
package org.opendaylight.controller.routing.lb_routing_implementation.internal;
import org.opendaylight.controller.sal.core.Bandwidth;
import org.opendaylight.controller.sal.core.ConstructionException;
import org.opendayli
ght.controller.sal.core.Edge;
import org.opendaylight.controller.sal.core.Node;
import org.opendaylight.controller.sal.core.NodeConnector;
import org.opendaylight.controller.sal.core.Path;
import org.opendaylight.controller.sal.core.Property;
import org.op
endaylight.controller.sal.core.UpdateType;
import org.opendaylight.controller.sal.reader.IReadService;
import org.opendaylight.controller.sal.routing.IListenRoutingUpdates;
import org.opendaylight.controller.sal.routing.IRouting;
import org.opendaylight.co
ntroller.sal.topology.TopoEdgeUpdate;
import org.opendaylight.controller.switchmanager.ISwitchManager;
import org.opendaylight.controller.topologymanager.ITopologyManager;
import org.opendaylight.controller.topologymanager.ITopologyManagerAware;
import org
.opendaylight.controller.statisticsmanager.internal.StatisticsManager;
import edu.uci.ics.jung.algorithms.shortestpath.DijkstraShortestPath;
import edu.uci.ics.jung.graph.Graph;
import edu.uci.ics.jung.graph.SparseMultigraph;
import edu.uci.ics.jung.graph.
util.EdgeType;
import java.lang.Exception;
import java.lang.IllegalArgumentException;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.ArrayList;
import java.util.Set;
import java.util.Map;
import java.util.concu
rrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import java.util.Timer;
import java.util.TimerTask;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.commons.collections15.Transformer;
public class LbRoutingImp
lementation implements IRouting, ITopologyManagerAware {
private static Logger log = LoggerFactory
.getLogger(LbRoutingImplementation.class);
private ConcurrentMap<Short, Graph<Node, Edge>> topologyBWAware;
private ConcurrentMap<Sho
rt, DijkstraShortestPath<Node, Edge>> sptBWAware;
DijkstraShortestPath<Node, Edge> mtp; // Max Throughput Path
private Set<IListenRoutingUpdates> routingAware;
private ISwitchManager switchManager;
30
private ITopologyManager topologyManager;
private IReadService readService;
private static final long DEFAULT_LINK_SPEED = Bandwidth.BW1Gbps;
private StatisticsManager statMgr;
private Timer lbRoutingTimer;
private TimerTask lbRoutingTimerTask;
public void setListenR
outingUpdates(IListenRoutingUpdates i) {
if (this.routingAware == null) {
this.routingAware = new HashSet<IListenRoutingUpdates>();
}
if (this.routingAware != null) {
log.debug("Adding routingAware listener:
{}", i);
this.routingAware.add(i);
}
}
public void unsetListenRoutingUpdates(IListenRoutingUpdates i) {
if (this.routingAware == null) {
return;
}
log.debug("Removing routingAware listener");
this.routingAware.remove(i);
if (this.routingAware.isEmpty()) {
// We don't have any listener lets dereference
this.routingAware = null;
}
}
#Override
public synchronized void initMaxThroughput(
final Map<Edge, Number> EdgeWeightMap) {
if (mtp != null) {
log.error("Max Throughput Dijkstra is already enabled!");
return;
}
Transformer<Edge, ? extends Number> mtTransformer = null;
i
f (EdgeWeightMap == null) {
mtTransformer = new Transformer<Edge, Double>() {
public Double transform(Edge e) {
if (switchManager == null) {
log.error("switchManager is null");
return (double)
-
1;
}
NodeConnector srcNC = e.getTailNodeConnector();
NodeConnector dstNC = e.getHeadNodeConnector();
if ((srcNC == null) || (dstNC == null)) {
log.error("srcNC:{} or dstNC:{} is null", srcNC, dstNC);
return (double)
-
1;
}
Bandwidth bwSrc = (Bandwidth) switchManager
.getNodeConnecto
rProp(srcNC,
Bandwidth.BandwidthPropName);
Bandwidth bwDst = (Bandwidth) switchManager
.getNodeConnectorProp(dstNC,
Bandwidth.BandwidthP
ropName);
34
// If the src and dst vertex don't have incoming or
// outgoing links we can get ride of them
if (topo.containsVertex(src.getNode())
&& topo.inDegree(src.getNode()) == 0
&
& topo.outDegree(src.getNode()) == 0) {
log.debug("Removing vertex {}", src);
topo.removeVertex(src.getNode());
}
if (topo.containsVertex(dst.getNode())
&& top
o.inDegree(dst.getNode()) == 0
&& topo.outDegree(dst.getNode()) == 0) {
log.debug("Removing vertex {}", dst);
topo.removeVertex(dst.getNode());
}
}
spt.
reset();
if (bw.equals(baseBW)) {
clearMaxThroughput();
}
} else {
log.error("Cannot find topology for BW {} this is unexpected!", bw);
}
return edgePresentInGraph;
}
priv
ate boolean edgeUpdate(Edge e, UpdateType type, Set<Property> props) {
String srcType = null;
String dstType = null;
if (e == null || type == null) {
log.error("Edge or Update type are null!");
return false;
} else {
srcType = e.getTailNodeConnector().getType();
dstType = e.getHeadNodeConnector().getType();
if (srcType.equals(NodeConnector.NodeConnectorIDType.PRODUCTION)) {
log.debug("Skip updates f
or {}", e);
return false;
}
if (dstType.equals(NodeConnector.NodeConnectorIDType.PRODUCTION)) {
log.debug("Skip updates for {}", e);
return false;
}
}
Ban
dwidth bw = new Bandwidth(0);
boolean newEdge = false;
if (props != null)
props.remove(bw);
if (log.isDebugEnabled()) {
log.debug("edgeUpdate: {} bw: {}", e, bw.getValue());
}
35
Short baseBW = S
hort.valueOf((short) 0);
boolean add = (type == UpdateType.ADDED) ? true : false;
// Update base topo
newEdge = !updateTopo(e, baseBW, add);
if (newEdge == true) {
if (bw.getValue() != baseBW) {
/
/ Update BW topo
updateTopo(e, (short) bw.getValue(), add);
}
}
return newEdge;
}
#Override
public void edgeUpdate(List<TopoEdgeUpdate> topoedgeupdateList) {
boolean callListeners = false;
for (int i = 0; i < topoedgeupdateList.size(); i++) {
Edge e = topoedgeupdateList.get(i).getEdge();
Set<Property> p = topoedgeupdateList.get(i).getProperty();
UpdateType type = topoedgeupdateList.get(i).getUpdateTy
pe();
if ((edgeUpdate(e, type, p)) && (!callListeners)) {
callListeners = true;
}
}
if ((callListeners) && (this.routingAware != null)) {
/*Map<Edge, Number> edgeWeightMap = new
Map<Edge,
Number >();
Bandwidth bw = new Bandwidth(0);
Graph<Node, Edge> topo = this.topologyBWAware.get(bw);
Collection<Edge> edges = topo.getEdges();
Iterator<Edge> iterEdge;
Number i;
for(it
erEdge = edges.iterator(), i = 0;iterEdge.hasNext();){
Edge e = iterEdge.next();
edgeWeightMap.put(e,i);
i = i.intValue()+1;
}*/
clearMaxThroughput();
mtp =
null;
initMaxThroughput(null);
for (IListenRoutingUpdates ra : this.routingAware) {
try {
ra.recalculateDone();
} catch (Exception ex) {
log.error("
Exception on routingAware listener call", ex);
}
}
}
}
private void updateEdgeUtilization() {
log.debug("UpdateEdgeUtilization called");
clearMaxThroughput();
mtp = null;
init
MaxThroughput(null);
}
/**
* Function called by the dependency manager when all the required
36
* dependencies are satisfied
*
*/
#SuppressWarnings({ "unchecked", "rawtypes" })
public void init() {
log.debug("Rout
ing init() is called");
this.topologyBWAware = (ConcurrentMap<Short, Graph<Node, Edge>>) new ConcurrentHashMap();
this.sptBWAware = (ConcurrentMap<Short, DijkstraShortestPath<Node, Edge>>) new ConcurrentHashMap();
// Now create the
default topology, which doesn't consider the
// BW, also create the corresponding Dijkstra calculation
Graph<Node, Edge> g = new SparseMultigraph();
Short sZero = Short.valueOf((short) 0);
this.topologyBWAware.put(sZero, g);
this.sptBWAware.put(sZero, new DijkstraShortestPath(g));
// Topologies for other BW will be added on a needed base
this.statMgr = new StatisticsManager();
this.lbRoutingTimer = new Timer();
this.lbRoutingTimerTask =
new TimerTask() {
#Override
public void run() {
updateEdgeUtilization();
}
};
}
/**
* Function called by the dependency manager when at least one dependency
* become unsati
sfied or when the component is shutting down because for
* example bundle is being stopped.
*
*/
void destroy() {
log.debug("Routing destroy() is called");
}
/**
* Function called by dependency manager after "init
()" is called and after
* the services provided by the class are registered in the service registry
*
*/
void start() {
log.debug("Routing start() is called");
// build the routing database from the topology if it exists
.
Map<Edge, Set<Property>> edges = topologyManager.getEdges();
if (edges.isEmpty()) {
return;
}
List<TopoEdgeUpdate> topoedgeupdateList = new ArrayList<TopoEdgeUpdate>();
log.debug("Creating routing datab
ase from the topology");
for (Iterator<Map.Entry<Edge, Set<Property>>> i = edges.entrySet()
.iterator(); i.hasNext();) {
Map.Entry<Edge, Set<Property>> entry = i.next();
Edge e = entry.getKey();
Set<Property> props = entry.getValue();
TopoEdgeUpdate topoedgeupdate = new TopoEdgeUpdate(e, props,
UpdateType.ADDED);
topoedgeupdateList.add(topoedgeupdate);
37
}
edgeUpdate(topoedgeupdateL
ist);
// StatsTimer is set to 10s
lbRoutingTimer.scheduleAtFixedRate(lbRoutingTimerTask, 0, 10000);
}
/**
* Function called by the dependency manager before the services exported by
* the component are unregistered,
this will be followed by a "destroy ()"
* calls
*
*/
public void stop() {
log.debug("Routing stop() is called");
}
#Override
public void edgeOverUtilized(Edge edge) {
// TODO Auto
-
generated method stub
}
#Override
public void edgeUtilBackToNormal(Edge edge) {
// TODO Auto
-
generated method stub
}
public void setSwitchManager(ISwitchManager switchManager) {
this.switchManager = switchManager;
}
public void unsetS
witchManager(ISwitchManager switchManager) {
if (this.switchManager == switchManager) {
this.switchManager = null;
}
}
public void setReadService(IReadService readService) {
this.readService = readService;
}
public void unsetReadService(IReadService readService) {
if (this.readService == readService) {
this.readService = null;
}
}
public void setTopologyManager(ITopologyManager tm) {
this.topologyManager = tm;
}
public void unsetTopologyManager(ITopologyManager tm) {
if (this.topologyManager == tm) {
this.topologyManager = null;
}
}
}

SQL query into JTable

I've found one totally working query, which gets columns and their data from Oracle DB and puts the output in console printout.
I've spent 3 hours trying to display this data in Swing JTable.
When I am trying to bind data with JTable:
jTable1.setModel(new javax.swing.table.DefaultTableModel(
data, header
));
it keeps telling me that constructor is invalid. That's true, because I need arrays [] and [][] to make that. Any ideas how this can be implemented?
Here is the original query:
package com.javacoderanch.example.sql;
import java.sql.Connection;
import java.sql.DriverManager;
import java.sql.ResultSet;
import java.sql.ResultSetMetaData;
import java.sql.SQLException;
import java.sql.Statement;
import java.util.ArrayList;
public class MetadataColumnExample {
private static final String DRIVER = "oracle.jdbc.OracleDriver";
private static final String URL = "jdbc:oracle:thin:#//XXX";
private static final String USERNAME = "XXX";
private static final String PASSWORD = "XXX";
public static void main(String[] args) throws Exception {
Connection connection = null;
try {
//
// As the usual ritual, load the driver class and get connection
// from database.
//
Class.forName(DRIVER);
connection = DriverManager.getConnection(URL, USERNAME, PASSWORD);
//
// In the statement below we'll select all records from users table
// and then try to find all the columns it has.
//
Statement statement = connection.createStatement();
ResultSet resultSet = statement.executeQuery("select *\n"
+ "from booking\n"
+ "where TRACKING_NUMBER = 1000001741");
//
// The ResultSetMetaData is where all metadata related information
// for a result set is stored.
//
ResultSetMetaData metadata = resultSet.getMetaData();
int columnCount = metadata.getColumnCount();
//
// To get the column names we do a loop for a number of column count
// returned above. And please remember a JDBC operation is 1-indexed
// so every index begin from 1 not 0 as in array.
//
ArrayList<String> columns = new ArrayList<String>();
for (int i = 1; i < columnCount; i++) {
String columnName = metadata.getColumnName(i);
columns.add(columnName);
}
//
// Later we use the collected column names to get the value of the
// column it self.
//
while (resultSet.next()) {
for (String columnName : columns) {
String value = resultSet.getString(columnName);
System.out.println(columnName + " = " + value);
}
}
} catch (SQLException e) {
e.printStackTrace();
} finally {
connection.close();
}
}
}
Ok I found a bit better way adding SQL query to JTable without even using the ArrayList. Maybe will be helpful for someone, completely working:
import java.awt.*;
import javax.swing.*;
import java.sql.*;
import java.awt.image.BufferedImage;
public class report extends JFrame {
PreparedStatement ps;
Connection con;
ResultSet rs;
Statement st;
JLabel l1;
String bn;
int bid;
Date d1, d2;
int rows = 0;
Object data1[][];
JScrollPane scroller;
JTable table;
public report() {
Container cp = getContentPane();
cp.setLayout(new BorderLayout());
setSize(600, 600);
setLocation(50, 50);
setLayout(new BorderLayout());
setTitle("Library Report");
try {
Class.forName("oracle.jdbc.OracleDriver");
con = DriverManager.getConnection("jdbc:oracle:thin:#XXXXX", "XXXXX", "XXXXX");
} catch (Exception e) {
}
try {
/*
* JDBC 2.0 provides a way to retrieve a rowcount from a ResultSet without having to scan through all the rows
* So we add TYPE_SCROLL_INSENSITIVE & CONCUR_READ_ONLY
*/
st = con.createStatement(ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_READ_ONLY); //Creating Statement Object
} catch (SQLException sqlex) {
System.out.println("!!!###");
}
try {
rs = st.executeQuery("select TRACKING_NUMBER, INCO_TERM_CODE, MODE_OF_TRANSPORT\n"
+ "from salog.booking\n"
+ "where rownum < 5");
// Counting rows
rs.last();
int rows = rs.getRow();
rs.beforeFirst();
System.out.println("cc " + rows);
ResultSetMetaData metaData = rs.getMetaData();
int colummm = metaData.getColumnCount();
System.out.println("colms =" + colummm);
Object[] Colheads = {"BookId", "BookName", "rtyry"};
if (Colheads.length != colummm) {
// System.out.println("EPT!!");
JOptionPane.showMessageDialog(rootPane, "Incorrect Column Headers quantity listed in array! The program will now exit.", "System Error", JOptionPane.ERROR_MESSAGE);
System.exit(0);
}
data1 = new Object[rows][Colheads.length];
for (int i1 = 0; i1 < rows; i1++) {
rs.next();
for (int j1 = 0; j1 < Colheads.length; j1++) {
data1[i1][j1] = rs.getString(j1 + 1);
}
}
JTable table = new JTable(data1, Colheads);
JScrollPane jsp = new JScrollPane(table);
getContentPane().add(jsp);
} catch (Exception e) {
}
setVisible(true);
setDefaultCloseOperation(JFrame.EXIT_ON_CLOSE);
}
public static void main(String args[]) {
JFrame frm = new report();
frm.setSize(600, 600);
frm.setLocation(50, 50);
BufferedImage image = null;
frm.setIconImage(image);
frm.setVisible(true);
frm.show();
}
}

With Lucene 4.3.1, How to get all terms which occur in sub-range of all docs

Suppose a lucene index with fields : date, content.
I want to get all terms value and frequency of docs whose date is yesterday. date field is keyword field. content field is analyzed and indexed.
Pls help me with sample code.
My solution source is as follow ...
/**
*
*
* #param reader
* #param fromDateTime
* - yyyymmddhhmmss
* #param toDateTime
* - yyyymmddhhmmss
* #return
*/
static public String top10(IndexSearcher searcher, String fromDateTime,
String toDateTime) {
String top10Query = "";
try {
Query query = new TermRangeQuery("tweetDate", new BytesRef(
fromDateTime), new BytesRef(toDateTime), true, false);
final BitSet bits = new BitSet(searcher.getIndexReader().maxDoc());
searcher.search(query, new Collector() {
private int docBase;
#Override
public void setScorer(Scorer scorer) throws IOException {
}
#Override
public void setNextReader(AtomicReaderContext context)
throws IOException {
this.docBase = context.docBase;
}
#Override
public void collect(int doc) throws IOException {
bits.set(doc + docBase);
}
#Override
public boolean acceptsDocsOutOfOrder() {
return false;
}
});
//
Analyzer analyzer = new StandardAnalyzer(Version.LUCENE_43,
EnglishStopWords.getEnglishStopWords());
//
HashMap<String, Long> wordFrequency = new HashMap<>();
for (int wx = 0; wx < bits.length(); ++wx) {
if (bits.get(wx)) {
Document wd = searcher.doc(wx);
//
TokenStream tokenStream = analyzer.tokenStream("temp",
new StringReader(wd.get("content")));
// OffsetAttribute offsetAttribute = tokenStream
// .addAttribute(OffsetAttribute.class);
CharTermAttribute charTermAttribute = tokenStream
.addAttribute(CharTermAttribute.class);
tokenStream.reset();
while (tokenStream.incrementToken()) {
// int startOffset = offsetAttribute.startOffset();
// int endOffset = offsetAttribute.endOffset();
String term = charTermAttribute.toString();
if (term.length() < 2)
continue;
Long wl;
if ((wl = wordFrequency.get(term)) == null)
wordFrequency.put(term, 1L);
else {
wl += 1;
wordFrequency.put(term, wl);
}
}
tokenStream.end();
tokenStream.close();
}
}
analyzer.close();
// sort
List<String> occurterm = new ArrayList<String>();
for (String ws : wordFrequency.keySet()) {
occurterm.add(String.format("%06d\t%s", wordFrequency.get(ws),
ws));
}
Collections.sort(occurterm, Collections.reverseOrder());
// make query string by top 10 words
int topCount = 10;
for (String ws : occurterm) {
if (topCount-- == 0)
break;
String[] tks = ws.split("\\t");
top10Query += tks[1] + " ";
}
top10Query.trim();
} catch (IOException e) {
e.printStackTrace();
} finally {
}
// return top10 word string
return top10Query;
}

What's the point of hibernatetemplate's bulkupdate?

Is hibernatetemplate's bulkUpdate actually doing a bulkUpdate? I looked at the code, and it doesn't seem to be doing bulkUpdate. Or maybe am I missing something?
public int bulkUpdate(final String queryString, final Object... values) throws DataAccessException {
return executeWithNativeSession(new HibernateCallback<Integer>() {
public Integer doInHibernate(Session session) throws HibernateException {
Query queryObject = session.createQuery(queryString);
prepareQuery(queryObject);
if (values != null) {
for (int i = 0; i < values.length; i++) {
queryObject.setParameter(i, values[i]);
}
}
return queryObject.executeUpdate();
}
});
}
whereas JdbcTemplate batchUpdate (looks like) is doing a batchUpdate
public int[] batchUpdate(final String[] sql) throws DataAccessException {
Assert.notEmpty(sql, "SQL array must not be empty");
if (logger.isDebugEnabled()) {
logger.debug("Executing SQL batch update of " + sql.length + " statements");
}
class BatchUpdateStatementCallback implements StatementCallback<int[]>, SqlProvider {
private String currSql;
public int[] doInStatement(Statement stmt) throws SQLException, DataAccessException {
int[] rowsAffected = new int[sql.length];
if (JdbcUtils.supportsBatchUpdates(stmt.getConnection())) {
for (String sqlStmt : sql) {
this.currSql = sqlStmt;
stmt.addBatch(sqlStmt);
}
rowsAffected = stmt.executeBatch();
}
else {
for (int i = 0; i < sql.length; i++) {
this.currSql = sql[i];
if (!stmt.execute(sql[i])) {
rowsAffected[i] = stmt.getUpdateCount();
}
else {
throw new InvalidDataAccessApiUsageException("Invalid batch SQL statement: " + sql[i]);
}
}
}
return rowsAffected;
}
public String getSql() {
return this.currSql;
}
}
return execute(new BatchUpdateStatementCallback());
}
Yes, it is doing bulk update. As you see, DELETE and INSERT queries executed in bulkUpdate method can affect multiple rows. That's why they are called bulk operations.
Point is to have handy method to execute update and execute query and return number of rows affected in bulk operation. Additionally it wraps exceptions to DataAccessException.