Using .Net framework 4, I have following input string
String s = "9330e655-63d4-4aee-be79-505554256bd3"
I want to write a function that will return true or false as to whether or not any input string is a valid SqlGuid:
Method 1:
' Parse to validate ...
ok = true
try
s_uid = SqlGuid.Parse( s )
catch e as Exception
ok = false
end try
return ok
However, the ASP.Net framework also provides the Guid.TryParse() method, ie:
Method 2:
' Parse to validate ...
ok = Guid.TryParse( s )
My question: Which is more efficient and what do most use to validate SQL GUIDs (Method 1 or 2)?
If you're just checking validity then you can use either method, and Guid.TryParse will allow you to validate without doing expensive exception handling.
Be aware, however, that the string representations of Guid and SqlGuid are not the same, so a string generated from a Guid should not be used to create a SqlGuid since it will result in a different GUID value.
best practice would be to use the build-in TryParse but if you want to look at the speed/performance itself here some quick benchmark.
don't forget to build as Release and don't run it from visual studio.
using System;
using System.Data.SqlTypes;
using System.Text.RegularExpressions;
namespace ConsoleApplication1
{
class Program
{
static void Main(string[] args)
{
var regex = new Regex(#"\b[A-F0-9]{8}(?:-[A-F0-9]{4}){3}-[A-F0-9]{12}\b", RegexOptions.IgnoreCase);
string s = "9330e655-63d4-4aee-be79-505554256bd3";
SqlGuid resultSql;
Guid result;
var sw = System.Diagnostics.Stopwatch.StartNew();
bool valid;
valid = true;
for (int i = 0; i < 500000; ++i)
{
try
{
resultSql = SqlGuid.Parse(s);
valid &= true;
}
catch
{
valid = false;
}
}
sw.Stop();
Console.WriteLine("sql try-catch {0}ms all valid {1}", sw.ElapsedMilliseconds, valid);
sw = System.Diagnostics.Stopwatch.StartNew();
valid = true;
for (int i = 0; i < 500000; ++i)
{
try
{
result = Guid.Parse(s);
valid &= true;
}
catch
{
valid = false;
}
}
sw.Stop();
Console.WriteLine("guid try-catch {0}ms all valid {1}", sw.ElapsedMilliseconds, valid);
sw = System.Diagnostics.Stopwatch.StartNew();
valid = true;
for (int i = 0; i < 500000; ++i)
{
valid &= Guid.TryParse(s, out result);
}
sw.Stop();
Console.WriteLine("tryparse {0}ms all valid {1}", sw.ElapsedMilliseconds, valid);
sw = System.Diagnostics.Stopwatch.StartNew();
valid = true;
for (int i = 0; i < 500000; ++i)
{
valid &= regex.IsMatch(s);
}
sw.Stop();
Console.WriteLine("regex {0}ms all valid {1}", sw.ElapsedMilliseconds, valid);
Console.ReadKey();
}
}
}
Related
Batch query execution from what I have read from multiple sources online have been stating that it enables grouping multiple statements together and executing it at once, thereby eliminating multiple back and forth communication.
Some sources that claim this are:
https://www.tutorialspoint.com/jdbc/jdbc-batch-processing.htm#:~:text=Batch%20Processing%20allows%20you%20to,communication%20overhead%2C%20thereby%20improving%20performance.
http://tutorials.jenkov.com/jdbc/batchupdate.html
https://www.baeldung.com/jdbc-batch-processing
All of these talk about single network trip etc, however going through the source code of H2 or Sqlite, it does look like its executing one by one. Albeit with autocommit disabled.
Eg: Sqlite
final synchronized int[] executeBatch(long stmt, int count, Object[] vals, boolean autoCommit) throws SQLException {
if (count < 1) {
throw new SQLException("count (" + count + ") < 1");
}
final int params = bind_parameter_count(stmt);
int rc;
int[] changes = new int[count];
try {
for (int i = 0; i < count; i++) {
reset(stmt);
for (int j = 0; j < params; j++) {
rc = sqlbind(stmt, j, vals[(i * params) + j]);
if (rc != SQLITE_OK) {
throwex(rc);
}
}
rc = step(stmt);
if (rc != SQLITE_DONE) {
reset(stmt);
if (rc == SQLITE_ROW) {
throw new BatchUpdateException("batch entry " + i + ": query returns results", changes);
}
throwex(rc);
}
changes[i] = changes();
}
}
finally {
ensureAutoCommit(autoCommit);
}
reset(stmt);
return changes;
}
Eg: H2
public int[] executeBatch() throws SQLException {
try {
debugCodeCall("executeBatch");
if (batchParameters == null) {
// Empty batch is allowed, see JDK-4639504 and other issues
batchParameters = Utils.newSmallArrayList();
}
batchIdentities = new MergedResult();
int size = batchParameters.size();
int[] result = new int[size];
SQLException first = null;
SQLException last = null;
checkClosedForWrite();
for (int i = 0; i < size; i++) {
Value[] set = batchParameters.get(i);
ArrayList<? extends ParameterInterface> parameters =
command.getParameters();
for (int j = 0; j < set.length; j++) {
Value value = set[j];
ParameterInterface param = parameters.get(j);
param.setValue(value, false);
}
try {
result[i] = executeUpdateInternal();
// Cannot use own implementation, it returns batch identities
ResultSet rs = super.getGeneratedKeys();
batchIdentities.add(((JdbcResultSet) rs).result);
} catch (Exception re) {
SQLException e = logAndConvert(re);
if (last == null) {
first = last = e;
} else {
last.setNextException(e);
}
result[i] = Statement.EXECUTE_FAILED;
}
}
batchParameters = null;
if (first != null) {
throw new JdbcBatchUpdateException(first, result);
}
return result;
} catch (Exception e) {
throw logAndConvert(e);
}
}
From the above code I see that there are multiple calls to the database, with each having its own result set. How does batch execution actually work?
I'm training a model using Google AutoML. I then exported a TFLite model to run on an android app.
With the same image used in the app and in the browser (Google AutoML gives you a chance to test your model in the browser), I get a very accurate answer in the browser but in my app, I get a not-so accurate answer.
Any thoughts on why this could be happening?
Relevant code:
public void predict(String imagePath, final Promise promise) {
imgData.order(ByteOrder.nativeOrder());
labelProbArray = new byte[1][RESULTS_TO_SHOW];
try {
labelList = loadLabelList();
} catch (Exception ex) {
ex.printStackTrace();
}
Log.w("FIND_ ", imagePath);
Bitmap bitmap = BitmapFactory.decodeFile(imagePath);
convertBitmapToByteBuffer(bitmap);
try {
tflite = new Interpreter(loadModelFile());
} catch (Exception ex) {
Log.w("FIND_exception in loading tflite", "1");
}
tflite.run(imgData, labelProbArray);
promise.resolve(getResult());
}
private WritableNativeArray getResult() {
WritableNativeArray result = new WritableNativeArray();
//ArrayList<JSONObject> result = new ArrayList<JSONObject>();
try {
for (int i = 0; i < RESULTS_TO_SHOW; i++) {
WritableNativeMap map = new WritableNativeMap();
map.putString("label", Integer.toString(i));
float output = (float)(labelProbArray[0][i] & 0xFF) / 255f;
map.putString("prob", String.valueOf(output));
result.pushMap(map);
Log.w("FIND_label ", Integer.toString(i));
Log.w("FIND_prob ", String.valueOf(labelProbArray[0][i]));
}
} catch (Exception ex) {
ex.printStackTrace();
}
return result;
}
private List<String> loadLabelList() throws IOException {
Activity activity = getCurrentActivity();
List<String> labelList = new ArrayList<String>();
BufferedReader reader =
new BufferedReader(new InputStreamReader(activity.getAssets().open(LABEL_PATH)));
String line;
while ((line = reader.readLine()) != null) {
labelList.add(line);
}
reader.close();
return labelList;
}
private void convertBitmapToByteBuffer(Bitmap bitmap) {
if (imgData == null) {
return;
}
imgData.rewind();
Log.w("FIND_bitmap width ", String.valueOf(bitmap.getWidth()));
Log.w("FIND_bitmap height ", String.valueOf(bitmap.getHeight()));
bitmap.getPixels(intValues, 0, bitmap.getWidth(), 0, 0, bitmap.getWidth(), bitmap.getHeight());
// Convert the image to floating point.
int pixel = 0;
//long startTime = SystemClock.uptimeMillis();
for (int i = 0; i < DIM_IMG_SIZE_X; ++i) {
for (int j = 0; j < DIM_IMG_SIZE_Y; ++j) {
final int val = intValues[pixel++];
imgData.put((byte) ( ((val >> 16) & 0xFF)));
imgData.put((byte) ( ((val >> 8) & 0xFF)));
imgData.put((byte) ( (val & 0xFF)));
Log.w("FIND_i", String.valueOf(i));
Log.w("FIND_j", String.valueOf(j));
}
}
}
private MappedByteBuffer loadModelFile() throws IOException {
Activity activity = getCurrentActivity();
AssetFileDescriptor fileDescriptor = activity.getAssets().openFd(MODEL_PATH);
FileInputStream inputStream = new FileInputStream(fileDescriptor.getFileDescriptor());
FileChannel fileChannel = inputStream.getChannel();
long startOffset = fileDescriptor.getStartOffset();
long declaredLength = fileDescriptor.getDeclaredLength();
return fileChannel.map(FileChannel.MapMode.READ_ONLY, startOffset, declaredLength);
}
One of the things I don't know if is correct is how I adjust my output data into a probability. In the getResult() method, I write this:
float output = (float)(labelProbArray[0][i] & 0xFF) / 255f;
I divide by 255 in order to get a number between 0 and 1, but I don't know if this is correct.
The .tflite model is a quantized uint8 model if that helps.
My code involves both Processing and Arduino. 5 different photocells are triggering 5 different sounds. My sound files play only when the ldrvalue is above the threshold.
The Null Pointer Exception is highlighted on this line
for (int i = 0; i < ldrValues.length; i++) {
I am not sure which part of my code should be changed so that I can run it.
import processing.serial.*;
import processing.sound.*;
SoundFile[] soundFiles = new SoundFile[5];
Serial myPort; // Create object from Serial class
int[] ldrValues;
int[] thresholds = {440, 490, 330, 260, 450};
int i = 0;
boolean[] states = {false, false, false, false, false};
void setup() {
size(200, 200);
println((Object[])Serial.list());
String portName = Serial.list()[3];
myPort = new Serial(this, portName, 9600);
soundFiles[0] = new SoundFile(this, "1.mp3");
soundFiles[1] = new SoundFile(this, "2.mp3");
soundFiles[2] = new SoundFile(this, "3.mp3");
soundFiles[3] = new SoundFile(this, "4.mp3");
soundFiles[4] = new SoundFile(this, "5.mp3");
}
void draw()
{
background(255);
//serial loop
while (myPort.available() > 0) {
String myString = myPort.readStringUntil(10);
if (myString != null) {
//println(myString);
ldrValues = int(split(myString.trim(), ','));
//println(ldrValues);
}
}
for (int i = 0; i < ldrValues.length; i++) {
println(states[i]);
println(ldrValues[i]);
if (ldrValues[i] > thresholds[i] && !states[i]) {
println("sensor " + i + " is activated");
soundFiles[i].play();
states[i] = true;
}
if (ldrValues[i] < thresholds[i]) {
println("sensor " + i + " is NOT activated");
soundFiles[i].stop();
states[i] = false;
}
}
}
You're approach is shall we say optimistic ? :)
It's always assuming there was a message from Serial, always formatted the right way so it could be parsed and there were absolutely 0 issues buffering data (incomplete strings, etc.))
The simplest thing you could do is check if the parsing was successful, otherwise the ldrValues array would still be null:
void draw()
{
background(255);
//serial loop
while (myPort.available() > 0) {
String myString = myPort.readStringUntil(10);
if (myString != null) {
//println(myString);
ldrValues = int(split(myString.trim(), ','));
//println(ldrValues);
}
}
// double check parsing int values from the string was successfully as well, not just buffering the string
if(ldrValues != null){
for (int i = 0; i < ldrValues.length; i++) {
println(states[i]);
println(ldrValues[i]);
if (ldrValues[i] > thresholds[i] && !states[i]) {
println("sensor " + i + " is activated");
soundFiles[i].play();
states[i] = true;
}
if (ldrValues[i] < thresholds[i]) {
println("sensor " + i + " is NOT activated");
soundFiles[i].stop();
states[i] = false;
}
}
}else{
// print a helpful debugging message otherwise
println("error parsing ldrValues from string: " + myString);
}
}
(Didn't know you could parse a int[] with int(): nice!)
I am trying to work with payloads in Lucene 6 but I am having troubles. The idea is to index payloads and use them in a CustomScoreQuery to check if the payload of a query term matches the payload for the document term.
Here is my payload filter:
#Override
public final boolean incrementToken() throws IOException {
if (!this.input.incrementToken()) {
return false;
}
// get the current token
final char[] token = Arrays.copyOfRange(this.termAtt.buffer(), 0, this.termAtt.length());
String stoken = String.valueOf(token);
String[] parts = stoken.split(Constants.PAYLOAD_DELIMITER);
if (parts.length > 1 && parts.length == 2){
termAtt.setLength(parts[0].length());
// the rest is the payload
BytesRef br = new BytesRef(parts[1]);
System.out.println(br);
payloadAtt.setPayload(br);
}else if (parts.length > 1){
// skip
}else{
// no payload here
payloadAtt.setPayload(null);
}
return true;
}
It seems to be adding the payload, however when I try to access the payload in CustomScoreQuery it just keeps returning null.
public float determineBoost(int doc) throws IOException{
float boost = 1f;
LeafReader reader = this.context.reader();
System.out.println("Has payloads:" + reader.getFieldInfos().hasPayloads());
// loop through each location of the term and boost if location matches the payload
if (reader != null){
PostingsEnum posting = reader.postings(new Term(this.field, term.getTerm()), PostingsEnum.POSITIONS);
System.out.println("Term: " + term.getTerm());
if (posting != null){
// move to the document currently looking at
posting.advance(doc);
int count = 0;
while (count < posting.freq()){
BytesRef load = posting.getPayload();
System.out.println(posting);
System.out.println(posting.getClass());
System.out.println(posting.attributes());
System.out.println("Load: " + load);
// if the location matches in the term location than boos the term by the boost factor
try {
if(load != null && term.containLocation(new Payload(load))){
boost = boost * this.boost;
}
} catch (PayloadException e) {
// do not care too much, the payload is unrecognized
// this is not going to change the boost factor
}
posting.nextPosition();
count += 1;
}
}
}
return boost;
}
For my two tests it keeps stating the load is null. Any suggestions or help?
I am interfacing with a PostgreSQL database with NHibernate.
Background
I made some simple tests...it seems it's taking 2 seconds to persist 300 records.
I have a Perl program with identical functionality, but issue direct SQL instead, takes only 70% of the time.
I am not sure if this is expected. I thought C#/NHibernate would be faster or at least on par.
Questions
One of my observation is that (with show_sql turned on), the NHibernate is issuing INSERTs a few hundreds times, instead of doing bulk INSERT that take cares of multiple rows. And note I am assigning the primary key myself, not using the "native" generator.
Is that expected? Is there anyway I could make it issue bulk INSERT statement instead? It seems to me that this could be one of the area I could speed up the performance.
As stachu found out correctly: NHibernate does not have *BatchingBatcher(Factory) for PostgreSQL(Npgsql)
As stachu askes: Did anybody managed to force Nhibarnate to do batch inserts to PostgreSQL
I wrote a Batcher that doesn't use any Npgsql batching stuff, but does manipulate the SQL String "oldschool style" (INSERT INTO [..] VALUES (...),(...), ...)
using System;
using System.Collections;
using System.Data;
using System.Diagnostics;
using System.Text;
using Npgsql;
namespace NHibernate.AdoNet
{
public class PostgresClientBatchingBatcherFactory : IBatcherFactory
{
public virtual IBatcher CreateBatcher(ConnectionManager connectionManager, IInterceptor interceptor)
{
return new PostgresClientBatchingBatcher(connectionManager, interceptor);
}
}
/// <summary>
/// Summary description for PostgresClientBatchingBatcher.
/// </summary>
public class PostgresClientBatchingBatcher : AbstractBatcher
{
private int batchSize;
private int countOfCommands = 0;
private int totalExpectedRowsAffected;
private StringBuilder sbBatchCommand;
private int m_ParameterCounter;
private IDbCommand currentBatch;
public PostgresClientBatchingBatcher(ConnectionManager connectionManager, IInterceptor interceptor)
: base(connectionManager, interceptor)
{
batchSize = Factory.Settings.AdoBatchSize;
}
private string NextParam()
{
return ":p" + m_ParameterCounter++;
}
public override void AddToBatch(IExpectation expectation)
{
if(expectation.CanBeBatched && !(CurrentCommand.CommandText.StartsWith("INSERT INTO") && CurrentCommand.CommandText.Contains("VALUES")))
{
//NonBatching behavior
IDbCommand cmd = CurrentCommand;
LogCommand(CurrentCommand);
int rowCount = ExecuteNonQuery(cmd);
expectation.VerifyOutcomeNonBatched(rowCount, cmd);
currentBatch = null;
return;
}
totalExpectedRowsAffected += expectation.ExpectedRowCount;
log.Info("Adding to batch");
int len = CurrentCommand.CommandText.Length;
int idx = CurrentCommand.CommandText.IndexOf("VALUES");
int endidx = idx + "VALUES".Length + 2;
if (currentBatch == null)
{
// begin new batch.
currentBatch = new NpgsqlCommand();
sbBatchCommand = new StringBuilder();
m_ParameterCounter = 0;
string preCommand = CurrentCommand.CommandText.Substring(0, endidx);
sbBatchCommand.Append(preCommand);
}
else
{
//only append Values
sbBatchCommand.Append(", (");
}
//append values from CurrentCommand to sbBatchCommand
string values = CurrentCommand.CommandText.Substring(endidx, len - endidx - 1);
//get all values
string[] split = values.Split(',');
ArrayList paramName = new ArrayList(split.Length);
for (int i = 0; i < split.Length; i++ )
{
if (i != 0)
sbBatchCommand.Append(", ");
string param = null;
if (split[i].StartsWith(":")) //first named parameter
{
param = NextParam();
paramName.Add(param);
}
else if(split[i].StartsWith(" :")) //other named parameter
{
param = NextParam();
paramName.Add(param);
}
else if (split[i].StartsWith(" ")) //other fix parameter
{
param = split[i].Substring(1, split[i].Length-1);
}
else
{
param = split[i]; //first fix parameter
}
sbBatchCommand.Append(param);
}
sbBatchCommand.Append(")");
//rename & copy parameters from CurrentCommand to currentBatch
int iParam = 0;
foreach (NpgsqlParameter param in CurrentCommand.Parameters)
{
param.ParameterName = (string)paramName[iParam++];
NpgsqlParameter newParam = /*Clone()*/new NpgsqlParameter(param.ParameterName, param.NpgsqlDbType, param.Size, param.SourceColumn, param.Direction, param.IsNullable, param.Precision, param.Scale, param.SourceVersion, param.Value);
currentBatch.Parameters.Add(newParam);
}
countOfCommands++;
//check for flush
if (countOfCommands >= batchSize)
{
DoExecuteBatch(currentBatch);
}
}
protected override void DoExecuteBatch(IDbCommand ps)
{
if (currentBatch != null)
{
//Batch command now needs its terminator
sbBatchCommand.Append(";");
countOfCommands = 0;
log.Info("Executing batch");
CheckReaders();
//set prepared batchCommandText
string commandText = sbBatchCommand.ToString();
currentBatch.CommandText = commandText;
LogCommand(currentBatch);
Prepare(currentBatch);
int rowsAffected = 0;
try
{
rowsAffected = currentBatch.ExecuteNonQuery();
}
catch (Exception e)
{
if(Debugger.IsAttached)
Debugger.Break();
throw;
}
Expectations.VerifyOutcomeBatched(totalExpectedRowsAffected, rowsAffected);
totalExpectedRowsAffected = 0;
currentBatch = null;
sbBatchCommand = null;
m_ParameterCounter = 0;
}
}
protected override int CountOfStatementsInCurrentBatch
{
get { return countOfCommands; }
}
public override int BatchSize
{
get { return batchSize; }
set { batchSize = value; }
}
}
}
I also found that NHibernate is not doing batch inserts into PostgreSQL.
I identified two possible reasons:
1) Npgsql driver does not support batch inserts/updates (see forum)
2) NHibernate does not have *BatchingBatcher(Factory) for PostgreSQL(Npgsql). I tried using Devart dotConnect driver with NHibernate (I wrote custom driver for NHibernate) but it still did not worked.
I suppose this driver should also implement IEmbeddedBatcherFactoryProvider interface, but it seems not trivial for me (using one for Oracle did not worked ;) )
Did anybody managed to force Nhibarnate to do batch inserts to PostgreSQL or can confirm my conclusion?