Does SQLite3 have prepared statements in Node.js? - sql

From the npm docs, only visible prepared statements are for insert. Does these prepared statement work for Select, update, and delete?
I tried for select, there isn't a .each function where the rows are called back. Anyone been able to do this or have links to resources, cause I can sure as hell unable to find any.

According to the node-sqlite3 API documentation, you can use parameters in your SQL queries in several different ways:
// Directly in the function arguments.
db.run("UPDATE tbl SET name = ? WHERE id = ?", "bar", 2);
// As an array.
db.run("UPDATE tbl SET name = ? WHERE id = ?", [ "bar", 2 ]);
// As an object with named parameters.
db.run("UPDATE tbl SET name = $name WHERE id = $id", {
$id: 2,
$name: "bar"
});

Yes, prepared statements are supported.
With node-sqlite3:
var sqlite3 = require('sqlite3').verbose();
var db = new sqlite3.Database('data.db');
db.serialize(function() {
var stmt = db.prepare("INSERT INTO users VALUES (?,?)");
for (var i = 0; i < 10; i++) {
stmt.run("user " + i, "email " + i);
}
stmt.finalize();
stmt = db.prepare("SELECT * FROM users WHERE id=?");
stmt.each(userId, function(err, row) {
console.log(row.name, row.email);
}, function(err, count) {
stmt.finalize();
});
});
With better-sqlite3:
var Database = require('better-sqlite3');
var db = new Database('foobar.db', options);
var stmt = db.prepare("INSERT INTO users VALUES (?,?)");
for (var i = 0; i < 10; i++) {
stmt.run("user " + i, "email " + i);
}
var stmt = db.prepare('SELECT * FROM users WHERE id=?');
var row = stmt.get(userId);
console.log(row.name, row.email);

Related

How to update correctly a table?

I try to update a table.
I do a first request to get all rows then I update this table with same data but passwords are crypted before update.
'use strict';
var sql = require('./db.js');
var crypto = require('crypto');
var Crypt = require('./encryption.js');
//Support object constructor
var Login = function(login){
this.login = login.login;
this.password = login.password;
};
sql.query("SELECT * FROM login WHERE Password IS NOT NULL OR Password != ''", function(err,res) {
if (err) {
console.log(err);
}
else {
console.log(res.length);
for (var i = 0; i < res.length ; i++) {
//console.log(res[i].Password);
//if (res[i].Password != '' ){
sql.query("UPDATE login SET Password = ? where id=?", [Crypt.encrypt(res[i].Password), i], fu
if (err){
console.log(err);
}else {
console.log(res2, 'ok');
}
}) //}
}
}
})
When I do update only some rows are updatted but not all. I want to do it on all rows.
the mistake was in the query. I was using the iterator as id but ids aren't orderded in the database. It was res[i].id instead of i in the query parameter

how to input nodejs variable into oracle sql query

I am trying to pass a nodejs variable to a sql query
Below is my code:
var b= 101;
connection = await oracledb.getConnection( {
user : dbConfig.user,
password : dbConfig.password,
connectString : dbConfig.connectString
});
sql = 'SELECT * FROM mytab where id= b';
binds = {};
options = {
outFormat: oracledb.OBJECT // query result format
};
result = await connection.execute(sql, binds, options);
try something along these lines:
sql = 'SELECT * FROM mytab where id= $1::int';
options = {
outFormat: oracledb.OBJECT // query result format
};
result = await connection.execute(sql, [ b ], options);

BigQuery Result save as google sheets through api

In Google BigQuery WebUI, it shows query result screen after executing a query, and it shows the button of "Save as Google Sheets". I like this feature but would like to automate this, is there such function through the REST API that I could do?
It doesn’t seem like there is a straightforward way to do this directly with the BigQuery API. There are few workarounds for this though:
You can use the BigQuery API to query your data and then the GoogleSheets API to upload it to Google Sheets.
You can use Google Apps Script. If you go to this link, you click on “New Script”, you can run the code below. You can adapt this to your needs. You can also add a trigger to run the script every hour/minute …
Here the code snippet from this link:
function runQuery() {
// Replace this value with the project ID listed in the Google
// Cloud Platform project.
var projectId = 'XXXXXXXX';
var request = {
query: 'SELECT TOP(word, 300) AS word, COUNT(*) AS word_count ' +
'FROM publicdata:samples.shakespeare WHERE LENGTH(word) > 10;'
};
var queryResults = BigQuery.Jobs.query(request, projectId);
var jobId = queryResults.jobReference.jobId;
// Check on status of the Query Job.
var sleepTimeMs = 500;
while (!queryResults.jobComplete) {
Utilities.sleep(sleepTimeMs);
sleepTimeMs *= 2;
queryResults = BigQuery.Jobs.getQueryResults(projectId, jobId);
}
// Get all the rows of results.
var rows = queryResults.rows;
while (queryResults.pageToken) {
queryResults = BigQuery.Jobs.getQueryResults(projectId, jobId, {
pageToken: queryResults.pageToken
});
rows = rows.concat(queryResults.rows);
}
if (rows) {
var spreadsheet = SpreadsheetApp.create('BiqQuery Results');
var sheet = spreadsheet.getActiveSheet();
// Append the headers.
var headers = queryResults.schema.fields.map(function(field) {
return field.name;
});
sheet.appendRow(headers);
// Append the results.
var data = new Array(rows.length);
for (var i = 0; i < rows.length; i++) {
var cols = rows[i].f;
data[i] = new Array(cols.length);
for (var j = 0; j < cols.length; j++) {
data[i][j] = cols[j].v;
}
}
sheet.getRange(2, 1, rows.length, headers.length).setValues(data);
Logger.log('Results spreadsheet created: %s',
spreadsheet.getUrl());
} else {
Logger.log('No rows returned.');
}
}

AdWord Script Export to BigQuery "Empty Response"

Utilizing the following AdWords Script to export to BigQuery, the BigQuery.Jobs.insert is causing the script to terminate due to "Empty response". Any reason the call is not getting a response?
var ACCOUNTS = ['xxx','xxx'];
var CONFIG = {
BIGQUERY_PROJECT_ID: 'xxx',
BIGQUERY_DATASET_ID: 'xxx',
// Truncate existing data, otherwise will append.
TRUNCATE_EXISTING_DATASET: true,
TRUNCATE_EXISTING_TABLES: true,
// Back up reports to Google Drive.
WRITE_DATA_TO_DRIVE: false,
// Folder to put all the intermediate files.
DRIVE_FOLDER: 'Adwords Big Query Test',
// Default date range over which statistics fields are retrieved.
DEFAULT_DATE_RANGE: '20140101,20140105',
// Lists of reports and fields to retrieve from AdWords.
REPORTS: [{NAME: 'KEYWORDS_PERFORMANCE_REPORT',
CONDITIONS: 'WHERE Impressions>0',
FIELDS: {'AccountDescriptiveName' : 'STRING',
'Date' : 'STRING',
'CampaignId' : 'STRING',
'CampaignName' : 'STRING',
'AdGroupId' : 'STRING',
'AdGroupName' : 'STRING',
'Id' : 'STRING',
'Criteria' : 'STRING',
'KeywordMatchType' : 'STRING',
'AdNetworkType1' : 'STRING',
'AdNetworkType2' : 'STRING',
'Device' : 'STRING',
'AveragePosition' : 'STRING',
'QualityScore' : 'STRING',
'CpcBid' : 'STRING',
'TopOfPageCpc' : 'STRING',
'Impressions' : 'STRING',
'Clicks' : 'STRING',
'ConvertedClicks' : 'STRING',
'Cost' : 'STRING',
'Conversions' : 'STRING'
}
}],
RECIPIENT_EMAILS: [
'xxx',
]
};
function main() {
createDataset();
for (var i = 0; i < CONFIG.REPORTS.length; i++) {
var reportConfig = CONFIG.REPORTS[i];
createTable(reportConfig);
}
folder = getDriveFolder();
// Get an account iterator.
var accountIterator = MccApp.accounts().withIds(ACCOUNTS).withLimit(10).get();
var jobIdMap = {};
while (accountIterator.hasNext()) {
// Get the current account.
var account = accountIterator.next();
// Select the child account.
MccApp.select(account);
// Run reports against child account.
var accountJobIds = processReports(folder, account.getCustomerId());
jobIdMap[account.getCustomerId()] = accountJobIds;
}
waitTillJobsComplete(jobIdMap);
sendEmail(jobIdMap);
}
function createDataset() {
if (datasetExists()) {
if (CONFIG.TRUNCATE_EXISTING_DATASET) {
BigQuery.Datasets.remove(CONFIG.BIGQUERY_PROJECT_ID,
CONFIG.BIGQUERY_DATASET_ID, {'deleteContents' : true});
Logger.log('Truncated dataset.');
} else {
Logger.log('Dataset %s already exists. Will not recreate.',
CONFIG.BIGQUERY_DATASET_ID);
return;
}
}
// Create new dataset.
var dataSet = BigQuery.newDataset();
dataSet.friendlyName = CONFIG.BIGQUERY_DATASET_ID;
dataSet.datasetReference = BigQuery.newDatasetReference();
dataSet.datasetReference.projectId = CONFIG.BIGQUERY_PROJECT_ID;
dataSet.datasetReference.datasetId = CONFIG.BIGQUERY_DATASET_ID;
dataSet = BigQuery.Datasets.insert(dataSet, CONFIG.BIGQUERY_PROJECT_ID);
Logger.log('Created dataset with id %s.', dataSet.id);
}
/**
* Checks if dataset already exists in project.
*
* #return {boolean} Returns true if dataset already exists.
*/
function datasetExists() {
// Get a list of all datasets in project.
var datasets = BigQuery.Datasets.list(CONFIG.BIGQUERY_PROJECT_ID);
var datasetExists = false;
// Iterate through each dataset and check for an id match.
if (datasets.datasets != null) {
for (var i = 0; i < datasets.datasets.length; i++) {
var dataset = datasets.datasets[i];
if (dataset.datasetReference.datasetId == CONFIG.BIGQUERY_DATASET_ID) {
datasetExists = true;
break;
}
}
}
return datasetExists;
}
function createTable(reportConfig) {
if (tableExists(reportConfig.NAME)) {
if (CONFIG.TRUNCATE_EXISTING_TABLES) {
BigQuery.Tables.remove(CONFIG.BIGQUERY_PROJECT_ID,
CONFIG.BIGQUERY_DATASET_ID, reportConfig.NAME);
Logger.log('Truncated dataset %s.', reportConfig.NAME);
} else {
Logger.log('Table %s already exists. Will not recreate.',
reportConfig.NAME);
return;
}
}
// Create new table.
var table = BigQuery.newTable();
var schema = BigQuery.newTableSchema();
var bigQueryFields = [];
// Add account column to table.
var accountFieldSchema = BigQuery.newTableFieldSchema();
accountFieldSchema.description = 'AccountId';
accountFieldSchema.name = 'AccountId';
accountFieldSchema.type = 'STRING';
bigQueryFields.push(accountFieldSchema);
// Add each field to table schema.
var fieldNames = Object.keys(reportConfig.FIELDS);
for (var i = 0; i < fieldNames.length; i++) {
var fieldName = fieldNames[i];
var bigQueryFieldSchema = BigQuery.newTableFieldSchema();
bigQueryFieldSchema.description = fieldName;
bigQueryFieldSchema.name = fieldName;
bigQueryFieldSchema.type = reportConfig.FIELDS[fieldName];
bigQueryFields.push(bigQueryFieldSchema);
}
schema.fields = bigQueryFields;
table.schema = schema;
table.friendlyName = reportConfig.NAME;
table.tableReference = BigQuery.newTableReference();
table.tableReference.datasetId = CONFIG.BIGQUERY_DATASET_ID;
table.tableReference.projectId = CONFIG.BIGQUERY_PROJECT_ID;
table.tableReference.tableId = reportConfig.NAME;
table = BigQuery.Tables.insert(table, CONFIG.BIGQUERY_PROJECT_ID,
CONFIG.BIGQUERY_DATASET_ID);
Logger.log('Created table with id %s.', table.id);
}
function tableExists(tableId) {
// Get a list of all tables in the dataset.
var tables = BigQuery.Tables.list(CONFIG.BIGQUERY_PROJECT_ID,
CONFIG.BIGQUERY_DATASET_ID);
var tableExists = false;
// Iterate through each table and check for an id match.
if (tables.tables != null) {
for (var i = 0; i < tables.tables.length; i++) {
var table = tables.tables[i];
if (table.tableReference.tableId == tableId) {
tableExists = true;
break;
}
}
}
return tableExists;
}
function processReports(folder, accountId) {
var jobIds = [];
// Iterate over each report type.
for (var i = 0; i < CONFIG.REPORTS.length; i++) {
var reportConfig = CONFIG.REPORTS[i];
Logger.log('Running report %s for account %s', reportConfig.NAME,
accountId);
// Get data as csv
var csvData = retrieveAdwordsReport(reportConfig, accountId);
// If configured, back up data.
if (CONFIG.WRITE_DATA_TO_DRIVE) {
var fileName = reportConfig.NAME + '_' + accountId;
folder.createFile(fileName, csvData, MimeType.CSV);
Logger.log('Exported data to Drive folder ' +
CONFIG.DRIVE_FOLDER + ' for report ' + fileName);
}
// Convert to Blob format.
var blobData = Utilities.newBlob(csvData, 'application/octet-stream');
// Load data
var jobId = loadDataToBigquery(reportConfig, blobData);
jobIds.push(jobId);
}
return jobIds;
}
function retrieveAdwordsReport(reportConfig, accountId) {
var fieldNames = Object.keys(reportConfig.FIELDS);
var report = AdWordsApp.report(
'SELECT ' + fieldNames.join(',') +
' FROM ' + reportConfig.NAME + ' ' + reportConfig.CONDITIONS +
' DURING ' + CONFIG.DEFAULT_DATE_RANGE);
var rows = report.rows();
var csvRows = [];
// Header row
csvRows.push('AccountId,'+fieldNames.join(','));
// Iterate over each row.
while (rows.hasNext()) {
var row = rows.next();
var csvRow = [];
csvRow.push(accountId);
for (var i = 0; i < fieldNames.length; i++) {
var fieldName = fieldNames[i];
var fieldValue = row[fieldName].toString();
var fieldType = reportConfig.FIELDS[fieldName];
/* Strip off % and perform any other formatting here.
if ((fieldType == 'FLOAT' || fieldType == 'INTEGER') &&
fieldValue.charAt(fieldValue.length - 1) == '%') {
fieldValue = fieldValue.substring(0, fieldValue.length - 1);
}*/
// Add double quotes to any string values.
if (fieldType == 'STRING') {
fieldValue = fieldValue.replace(',', ''); //Handle fields with comma in value returned
fieldValue = fieldValue.replace('"', ''); //Handle fields with double quotes in value returned
fieldValue = fieldValue.replace('+', ''); //Handle fields with "+" in value returned
fieldValue = '"' + fieldValue + '"';
}
csvRow.push(fieldValue);
}
csvRows.push(csvRow.join(','));
}
Logger.log('Downloaded ' + reportConfig.NAME + ' for account ' + accountId +
' with ' + csvRows.length + ' rows.');
return csvRows.join('\n');
}
function getDriveFolder() {
var folders = DriveApp.getFoldersByName(CONFIG.DRIVE_FOLDER);
// Assume first folder is the correct one.
if (folders.hasNext()) {
Logger.log('Folder name found. Using existing folder.');
return folders.next();
}
return DriveApp.createFolder(CONFIG.DRIVE_FOLDER);
}
function loadDataToBigquery(reportConfig, data) {
function guid() {
function s4() {
return Math.floor((1 + Math.random()) * 0x10000)
.toString(16)
.substring(1);
}
return s4() + s4() + s4() + s4() + s4() + s4() + s4() + s4();
}
var makeId = guid();
var job = {
jobReference: {
jobId: makeId
},
configuration: {
load: {
destinationTable: {
projectId: CONFIG.BIGQUERY_PROJECT_ID,
datasetId: CONFIG.BIGQUERY_DATASET_ID,
tableId: reportConfig.NAME
},
skipLeadingRows: 1,
ignoreUnknownValues: true,
allowJaggedRows: true,
allowLargeResults: true
}
}
};
var insertJob = BigQuery.Jobs.insert(job, CONFIG.BIGQUERY_PROJECT_ID, data);
Logger.log('Load job started for %s. Check on the status of it here: ' +
'https://bigquery.cloud.google.com/jobs/%s', reportConfig.NAME,
CONFIG.BIGQUERY_PROJECT_ID);
return job.jobReference.jobId;
}
function waitTillJobsComplete(jobIdMap) {
var complete = false;
var remainingJobs = [];
var accountIds = Object.keys(jobIdMap);
for (var i = 0; i < accountIds.length; i++){
var accountJobIds = jobIdMap[accountIds[i]];
remainingJobs.push.apply(remainingJobs, accountJobIds);
}
while (!complete) {
if (AdWordsApp.getExecutionInfo().getRemainingTime() < 5){
Logger.log('Script is about to timeout, jobs ' + remainingJobs.join(',') +
' are still incomplete.');
}
remainingJobs = getIncompleteJobs(remainingJobs);
if (remainingJobs.length == 0) {
complete = true;
}
if (!complete) {
Logger.log(remainingJobs.length + ' jobs still being processed.');
// Wait 5 seconds before checking status again.
Utilities.sleep(5000);
}
}
Logger.log('All jobs processed.');
}
function getIncompleteJobs(jobIds) {
var remainingJobIds = [];
for (var i = 0; i < jobIds.length; i++) {
var jobId = jobIds[i];
var getJob = BigQuery.Jobs.get(CONFIG.BIGQUERY_PROJECT_ID, jobId);
if (getJob.status.state != 'DONE') {
remainingJobIds.push(jobId);
}
}
return remainingJobIds;
}
It appears the "Empty Response" error is being thrown on:
var insertJob = BigQuery.Jobs.insert(job, CONFIG.BIGQUERY_PROJECT_ID, data);
Have tried quite a few tweaks, but the answer doesn't appear to obvious to me. Thanks for any help!
I can be wrong but - I think that problem was with jobId because of issue with guid() function - missing "+" sign.
function guid() {
function s4() {
return Math.floor((1 + Math.random()) * 0x10000)
.toString(16)
.substring(1);
}
return s4() + s4() + s4() + s4() + s4() s4() + s4() + s4();
}
Why not to use jobId from Response like below?
var job = {
configuration: {
load: {
destinationTable: {
projectId: CONFIG.BIGQUERY_PROJECT_ID,
datasetId: CONFIG.BIGQUERY_DATASET_ID,
tableId: reportConfig.NAME
},
skipLeadingRows: 1,
ignoreUnknownValues: true,
allowJaggedRows: true,
allowLargeResults: true
}
}
};
var insertJob = BigQuery.Jobs.insert(job, CONFIG.BIGQUERY_PROJECT_ID, data);
Logger.log('Load job started for %s. Check on the status of it here: ' +
'https://bigquery.cloud.google.com/jobs/%s', reportConfig.NAME,
CONFIG.BIGQUERY_PROJECT_ID);
return insertJob.jobReference.jobId;
Added
In this case I would suggest to log jobId (makeId = guid()) and get job status following below link https://cloud.google.com/bigquery/docs/reference/v2/jobs/get#try-it
Enter ProjectId and JobId and you at least will see what is going on with your job!!
AdWords places a "--" in for null values. If you define your report fields as anything but string (e.g., float, integer, etc.) the insert will fail because it can't convert the dash dash to a float or integer.
Try setting all of your fields to string and see if that solves the problem.
Have you tried setting the WRITE_DATA_TO_DRIVE parameter to true to confirm that the report export is successful? How large is the result? I get the same error when attempting an insert greater than 10MB (~25k rows depending on columns). If the file export to Google Drive looks good, you can add a condition to the while loop in retrieveAdwordsReport to limit the file size. There was also a post on https://groups.google.com/forum/#!forum/adwords-scripts mentioning an issue when including AdNetworkType columns: https://groups.google.com/forum/#!searchin/adwords-scripts/adnetworktype2%7Csort:relevance/adwords-scripts/yK57JHCt3Cw/Cl1SjFaQBQAJ.
Limit result size:
var processedRows = 0;
// Iterate over each row.
while (rows.hasNext() && ++processedRows < 5000) {
var row = rows.next();
var csvRow = [];
csvRow.push(accountId);
if (processedRows % 1000 == 0)
{
Logger.log('Processed %s rows.',processedRows);
}
...

SQL Error 42S22 when updating multiple records using query from server script in Azure Mobile Services

I'm currently running into problems while trying to customize the update script for a table of User Stories on Azure Mobile Services. My intention is to have the update script receive an item that contains an array of UserStory objects, construct a SQL string using that array, and then use mssql.query with that string against the UserStory table to update the individual records.
The following SQL achieves what I'm looking to do and works correctly when executed in Visual Studio:
UPDATE
masterstorylist.UserStory
SET
UserStory.relativepriority =
CASE UserStory.id
WHEN 'C36DC45B-170B-49F4-A747-6F4D989C1859' THEN '24'
WHEN '7EC413C3-17A8-410A-A394-ABF334364226' THEN '25'
WHEN '99890AFE-13C2-4E1A-8376-B501CB07080D' THEN '26'
END
Here's the server script that I have created in an attempt to achieve the same result:
function update(item, user, request) {
if(item.stories.length > 0){
var sql = "UPDATE masterstorylist.UserStory SET UserStory.relativepriority = CASE UserStory.id ";
for(var i = 0; i < item.stories.length; ++i){
sql+= ("WHEN '" + item.stories[i].id + "' THEN " + item.stories[i].relativepriority + " ");
}
sql+="END";
mssql.query(sql, {
success: function(results) {
request.respond();
},
error: function(err) {
request.respond(err);
}
});
}
else{
request.respond(statusCodes.NO_CONTENT, 'No records specified for update in request.');
}
}
The error I get back is
"sqlstate":"42S22","code:207"
which I think means that SQL can't find the relativepriority or id column. I've tried different syntax, such as qualifying the column names more or less or using [] around the columns, but the result is always the same.
I'm not sure what else to try, and details around creating and executing queries with the mssql object are hard to come by. I've been working off the examples here and here.
What am I missing?
EDIT: In case it helps, I reworked the code to see if using mssql.open would help. I modeled after the examples from the "MS Drivers for Node.js for SQL Server guide" (which I can't link to because I have low rep). The net result is the exact same error :/ Here's the new code in case it gives folks any ideas:
function update(item, user, request) {
if(item.stories.length > 0){
var sql = "UPDATE UserStory SET relativepriority = CASE id ";
for(var i = 0; i < item.stories.length; ++i){
sql+= ("WHEN '" + item.stories[i].id + "' THEN " + item.stories[i].relativepriority + " ");
}
sql+="END ";
console.log("opening connection...");
mssql.open({
success: function(connection){
console.log("mssql.open success");
console.log("executing query...");
connection.query(sql, function(err,results){
if(err){
console.log("query failed");
request.respond(err)
}
console.log("query successful");
request.respond();
});
},
error: function(err) {
console.log("fail on open: " + err);
request.respond(err);
}
});
}
else{
request.respond(statusCodes.OK, 'No records specified for update in request.');
}
}
P.S. This is my first post on Stack Overflow! :)
Ok, I figured out the answer to my own question. It turns out that the JSON object that the Mobile Service SDK was passing up wasn't formatted to the liking of Node.js. The item object coming into the script had an array of objects in it item.stories, which looked ok to me when it was logged to the console with console.log(item.stories); but apparently wasn't formatted well enough for me to access the individual objects in the 'item.stories' array using array notation.
I was able to fix both of the scripts above by adding the line var storiesToUpdate = JSON.parse(item.stories); and then using storiesToUpdate[i] instead of item.stories[i]. That seems to have done the trick. Ideally I'll find a way to fix the JSON generated on my clients so that I don't need this extra JSON.parse.
Here are three now working examples on how to update multiple records at once.
Simplest way to do what I wanted:
var storiesToUpdate;
var returnItem;
function update(item, user, request) {
storiesToUpdate = JSON.parse(item.stories);
returnItem = item;
if(storiesToUpdate.length > 0){
var sql = "UPDATE UserStory SET relativepriority = CASE id ";
for(var i = 0; i < storiesToUpdate.length; ++i){
sql+= ("WHEN '" + storiesToUpdate[i].id + "' THEN " + storiesToUpdate[i].relativepriority + " ");
}
sql+="END ";
mssql.query(sql,{
success: function(connection){
request.respond(statusCodes.OK, returnItem);
},
error: function(err) {
console.log("fail on open: " + err);
request.respond(err);
}
});
}
else{
request.respond(statusCodes.OK, returnItem);
}
}
Another way using mssql.open as well (not sure why you'd ever want to do this...):
var storiesToUpdate;
var returnItem;
function update(item, user, request) {
storiesToUpdate = JSON.parse(item.stories);
returnItem = item;
if(storiesToUpdate.length > 0){
var sql = "UPDATE UserStory SET relativepriority = CASE id ";
for(var i = 0; i < storiesToUpdate.length; ++i){
sql+= ("WHEN '" + storiesToUpdate[i].id + "' THEN " + storiesToUpdate[i].relativepriority + " ");
}
sql+="END ";
console.log("opening connection...");
mssql.open({
success: function(connection){
console.log("mssql.open success");
console.log("executing query...");
connection.query(sql, function(err,results){
if(err){
console.log("query failed");
request.respond(err)
}
console.log("query successful");
request.respond(statusCodes.OK, returnItem);
//request.respond(statusCodes.OK);
});
},
error: function(err) {
console.log("fail on open: " + err);
request.respond(err);
}
});
}
else{
request.respond(statusCodes.OK, returnItem);
}
}
And lastly, here's how to update multiple records without using mssql using the recommended batching techniques (this is rough and probably needs to be cleaned up):
var UserStoryTable = tables.getTable('UserStory');
var batchSize = 10;
var startIndex = 0;
var endIndex = 0;
var totalCount = 0;
var errorCount = 0;
var g_item;
var g_request;
var storiesToUpdate;
function update(item, user, request) {
//the json array has to be parsed first
storiesToUpdate = JSON.parse(item.stories);
g_item = item;
g_request = request;
if(item.stories.length > 0){
updateItems();
}
else{
console.log("empy update request");
request.respond(statusCodes.OK);
}
}
function updateItems(){
var batchCompletedCount = 0;
var updateComplete = function() {
batchCompletedCount++;
totalCount++;
if(batchCompletedCount === batchSize || totalCount === storiesToUpdate.length) {
if(totalCount < storiesToUpdate.length) {
// kick off the next batch
updateItems();
} else {
// or we are done, report the status of the job
// to the log and don't do any more processing
console.log("Update complete. %d Records processed. There were %d errors.", totalCount, errorCount);
g_request.respond(statusCodes.OK);
}
}
};
var errorHandler = function(err) {
errorCount++;
console.warn("Ignoring insert failure as part of batch.", err);
updateComplete();
};
var startIndex = totalCount;
var endIndex = totalCount + batchSize - 1;
if(endIndex >= storiesToUpdate.length) endIndex = storiesToUpdate.length - 1;
for(var i = startIndex; i <= endIndex; i++) {
console.log("Updating: " + storiesToUpdate[totalCount]);
UserStoryTable.update(storiesToUpdate[i],{
success: updateComplete,
error: errorHandler
});
}
}