I'm trying to get the exchanges in ripple and I found this data API and its working. But I want to use the ripple websocket tool for some reasons. Is there any websocket equivalent for this data API?
I think there is equivalent if you use "tx_history" command in the socket but Sorry to tell you that the json result are not equal to your specific data result.
ripple data apiv2 is being played by ajax. see the result json formatter in ripple for exchange:
} else if (resp.rows.length) {
resp.rows[0] = {
base_currency: resp.rows[0].base_currency,
base_issuer: resp.rows[0].base_issuer,
base_amount: resp.rows[0].base_amount,
counter_amount: resp.rows[0].counter_amount,
counter_currency: resp.rows[0].counter_currency,
counter_issuer: resp.rows[0].counter_issuer,
rate: resp.rows[0].rate,
executed_time: resp.rows[0].executed_time,
ledger_index: resp.rows[0].ledger_index,
buyer: resp.rows[0].buyer,
seller: resp.rows[0].seller,
taker: resp.rows[0].taker,
provider: resp.rows[0].provider,
autobridged_currency: resp.rows[0].autobridged_currency,
autobridged_issuer: resp.rows[0].autobridged_issuer,
offer_sequence: resp.rows[0].offer_sequence,
tx_type: resp.rows[0].tx_type,
tx_index: resp.rows[0].tx_index,
node_index: resp.rows[0].node_index,
tx_hash: resp.rows[0].tx_hash
};
}
res.csv(resp.rows, filename);
} else {
res.json({
result: 'success',
count: resp.rows.length,
marker: resp.marker,
exchanges: resp.rows
});
} }
and it can be only access by get url :
route: '/v2/exchanges/{:base}/{:counter}'
that is bind in there server.js:
app.get('/v2/exchanges/:base/:counter', routes.getExchanges);
and last hint this is their database query using hbase:
HbaseClient.getExchanges = function (options, callback) {
var base = options.base.currency + '|' + (options.base.issuer || '');
var counter = options.counter.currency + '|' + (options.counter.issuer
||''); var table;
var keyBase;
var startRow;
var endRow;
var descending;
var columns;
if (counter.toLowerCase() > base.toLowerCase()) {
keyBase = base + '|' + counter;
} else {
keyBase = counter + '|' + base;
options.invert = true; }
if (!options.interval) {
table = 'exchanges';
descending = options.descending ? true : false;
options.unreduced = true;
//only need certain columns
if (options.reduce) {
columns = [
'd:base_amount',
'd:counter_amount',
'd:rate',
'f:executed_time',
'f:buyer',
'f:seller',
'f:taker'
];
}
} else if (exchangeIntervals.indexOf(options.interval) !== -1) {
keyBase = options.interval + '|' + keyBase;
descending = options.descending ? true : false;
table = 'agg_exchanges';
} else {
callback('invalid interval: ' + options.interval);
return; }
startRow = keyBase + '|' + options.start.hbaseFormatStartRow();
endRow = keyBase + '|' + options.end.hbaseFormatStopRow();
if (options.autobridged) {
options.filterstring = "DependentColumnFilter('f', 'autobridged_currency')";
if (columns) {
columns.push('f:autobridged_currency');
} }
this.getScanWithMarker(this, {
table: table,
startRow: startRow,
stopRow: endRow,
marker: options.marker,
limit: options.limit,
descending: descending,
columns: columns,
filterString: options.filterstring }, function (err, resp) {
if (!resp) {
resp = {rows: []};
}
if (!resp.rows) {
resp.rows = [];
}
if (options.reduce && options.unreduced) {
if (descending) {
resp.rows.reverse();
}
resp.reduced = reduce(resp.rows);
} else if (table === 'exchanges') {
resp.rows = formatExchanges(resp.rows);
} else {
resp.rows = formatAggregates(resp.rows);
}
callback(err, resp); });
/** * formatExchanges */
function formatExchanges (rows) {
rows.forEach(function(row) {
var key = row.rowkey.split('|');
delete row.base_issuer;
delete row.base_currency;
delete row.counter_issuer;
delete row.counter_currency;
row.base_amount = parseFloat(row.base_amount);
row.counter_amount = parseFloat(row.counter_amount);
row.rate = parseFloat(row.rate);
row.offer_sequence = Number(row.offer_sequence || 0);
row.ledger_index = Number(row.ledger_index);
row.tx_index = Number(key[6]);
row.node_index = Number(key[7]);
row.time = utils.unformatTime(key[4]).unix();
});
if (options.invert) {
rows = invertPair(rows);
}
return rows; }
/** * formatAggregates */
function formatAggregates (rows) {
rows.forEach(function(row) {
var key = row.rowkey.split('|');
row.base_volume = parseFloat(row.base_volume),
row.counter_volume = parseFloat(row.counter_volume),
row.buy_volume = parseFloat(row.buy_volume),
row.count = Number(row.count);
row.open = parseFloat(row.open);
row.high = parseFloat(row.high);
row.low = parseFloat(row.low);
row.close = parseFloat(row.close);
row.vwap = parseFloat(row.vwap);
row.close_time = Number(row.close_time);
row.open_time = Number(row.open_time);
});
if (options.invert) {
rows = invertPair(rows);
}
return rows; }
/** * if the base/counter key was inverted, we need to swap * some of the values in the results */
function invertPair (rows) {
var swap;
var i;
if (options.unreduced) {
for (i=0; i<rows.length; i++) {
rows[i].rate = 1/rows[i].rate;
//swap base and counter vol
swap = rows[i].base_amount;
rows[i].base_amount = rows[i].counter_amount;
rows[i].counter_amount = swap;
//swap buyer and seller
swap = rows[i].buyer;
rows[i].buyer = rows[i].seller;
rows[i].seller = swap;
}
} else {
for (i=0; i<rows.length; i++) {
//swap base and counter vol
swap = rows[i].base_volume;
rows[i].base_volume = rows[i].counter_volume;
rows[i].counter_volume = swap;
//swap high and low
swap = 1/rows[i].high;
rows[i].high = 1/rows[i].low;
rows[i].low = swap;
//invert open, close, vwap
rows[i].open = 1/rows[i].open;
rows[i].close = 1/rows[i].close;
rows[i].vwap = 1/rows[i].vwap;
//invert buy_volume
rows[i].buy_volume /= rows[i].vwap;
}
}
return rows; }
/** * reduce * reduce all rows */
function reduce (rows) {
var buyVolume = 0;
var reduced = {
open: 0,
high: 0,
low: Infinity,
close: 0,
base_volume: 0,
counter_volume: 0,
buy_volume: 0,
count: 0,
open_time: 0,
close_time: 0
};
rows = formatExchanges(rows);
// filter out small XRP amounts
rows = rows.filter(function(row) {
if (options.base.currency === 'XRP' && row.base_amount < 0.0005) {
return false;
} else if (options.counter.currency === 'XRP' && row.counter_amount < 0.0005) {
return false;
} else {
return true;
}
});
if (rows.length) {
reduced.open_time = moment.unix(rows[0].time).utc().format();
reduced.close_time = moment.unix(rows[rows.length-1].time).utc().format();
reduced.open = rows[0].rate;
reduced.close = rows[rows.length -1].rate;
reduced.count = rows.length;
} else {
reduced.low = 0;
return reduced;
}
rows.forEach(function(row) {
reduced.base_volume += row.base_amount;
reduced.counter_volume += row.counter_amount;
if (row.rate < reduced.low) reduced.low = row.rate;
if (row.rate > reduced.high) reduced.high = row.rate;
if (row.buyer === row.taker) {
reduced.buy_volume += row.base_amount;
}
});
reduced.vwap = reduced.counter_volume / reduced.base_volume;
return reduced; } };
Maybe you should make a custom websocket that make your RPC call upgrade to 1.1 http protocol (ws).
In nodejs you can simply
// for http
var http = require('http');
// for websocket
var ws = require("nodejs-websocket")
var options = {
host: 'URL-RPC-HERE',
port: '80',
method: 'POST',
headers: {
'Content-Type': 'application/x-www-form-urlencoded',
'Content-Length': post_data.length
}
};
var req = http.request(options, function(res) {
// after getting the response wich is the <res>
// we can upgrade it to ws
upToWebsocket(res);
});
//Upgrade to websocket
var upToWebsocket = function(json) {
var server = ws.createServer(function (conn) {
conn.on("json", function (str) {
conn.sendText(str.toUpperCase()+"!!!")
})
conn.on("close", function (code, reason) {
console.log("Connection closed")
})
}).listen(8001)
}
And also if you have Rippled running on a server this does not help because there's no RPC or WS that supports exchange API.
Utilizing the following AdWords Script to export to BigQuery, the BigQuery.Jobs.insert is causing the script to terminate due to "Empty response". Any reason the call is not getting a response?
var ACCOUNTS = ['xxx','xxx'];
var CONFIG = {
BIGQUERY_PROJECT_ID: 'xxx',
BIGQUERY_DATASET_ID: 'xxx',
// Truncate existing data, otherwise will append.
TRUNCATE_EXISTING_DATASET: true,
TRUNCATE_EXISTING_TABLES: true,
// Back up reports to Google Drive.
WRITE_DATA_TO_DRIVE: false,
// Folder to put all the intermediate files.
DRIVE_FOLDER: 'Adwords Big Query Test',
// Default date range over which statistics fields are retrieved.
DEFAULT_DATE_RANGE: '20140101,20140105',
// Lists of reports and fields to retrieve from AdWords.
REPORTS: [{NAME: 'KEYWORDS_PERFORMANCE_REPORT',
CONDITIONS: 'WHERE Impressions>0',
FIELDS: {'AccountDescriptiveName' : 'STRING',
'Date' : 'STRING',
'CampaignId' : 'STRING',
'CampaignName' : 'STRING',
'AdGroupId' : 'STRING',
'AdGroupName' : 'STRING',
'Id' : 'STRING',
'Criteria' : 'STRING',
'KeywordMatchType' : 'STRING',
'AdNetworkType1' : 'STRING',
'AdNetworkType2' : 'STRING',
'Device' : 'STRING',
'AveragePosition' : 'STRING',
'QualityScore' : 'STRING',
'CpcBid' : 'STRING',
'TopOfPageCpc' : 'STRING',
'Impressions' : 'STRING',
'Clicks' : 'STRING',
'ConvertedClicks' : 'STRING',
'Cost' : 'STRING',
'Conversions' : 'STRING'
}
}],
RECIPIENT_EMAILS: [
'xxx',
]
};
function main() {
createDataset();
for (var i = 0; i < CONFIG.REPORTS.length; i++) {
var reportConfig = CONFIG.REPORTS[i];
createTable(reportConfig);
}
folder = getDriveFolder();
// Get an account iterator.
var accountIterator = MccApp.accounts().withIds(ACCOUNTS).withLimit(10).get();
var jobIdMap = {};
while (accountIterator.hasNext()) {
// Get the current account.
var account = accountIterator.next();
// Select the child account.
MccApp.select(account);
// Run reports against child account.
var accountJobIds = processReports(folder, account.getCustomerId());
jobIdMap[account.getCustomerId()] = accountJobIds;
}
waitTillJobsComplete(jobIdMap);
sendEmail(jobIdMap);
}
function createDataset() {
if (datasetExists()) {
if (CONFIG.TRUNCATE_EXISTING_DATASET) {
BigQuery.Datasets.remove(CONFIG.BIGQUERY_PROJECT_ID,
CONFIG.BIGQUERY_DATASET_ID, {'deleteContents' : true});
Logger.log('Truncated dataset.');
} else {
Logger.log('Dataset %s already exists. Will not recreate.',
CONFIG.BIGQUERY_DATASET_ID);
return;
}
}
// Create new dataset.
var dataSet = BigQuery.newDataset();
dataSet.friendlyName = CONFIG.BIGQUERY_DATASET_ID;
dataSet.datasetReference = BigQuery.newDatasetReference();
dataSet.datasetReference.projectId = CONFIG.BIGQUERY_PROJECT_ID;
dataSet.datasetReference.datasetId = CONFIG.BIGQUERY_DATASET_ID;
dataSet = BigQuery.Datasets.insert(dataSet, CONFIG.BIGQUERY_PROJECT_ID);
Logger.log('Created dataset with id %s.', dataSet.id);
}
/**
* Checks if dataset already exists in project.
*
* #return {boolean} Returns true if dataset already exists.
*/
function datasetExists() {
// Get a list of all datasets in project.
var datasets = BigQuery.Datasets.list(CONFIG.BIGQUERY_PROJECT_ID);
var datasetExists = false;
// Iterate through each dataset and check for an id match.
if (datasets.datasets != null) {
for (var i = 0; i < datasets.datasets.length; i++) {
var dataset = datasets.datasets[i];
if (dataset.datasetReference.datasetId == CONFIG.BIGQUERY_DATASET_ID) {
datasetExists = true;
break;
}
}
}
return datasetExists;
}
function createTable(reportConfig) {
if (tableExists(reportConfig.NAME)) {
if (CONFIG.TRUNCATE_EXISTING_TABLES) {
BigQuery.Tables.remove(CONFIG.BIGQUERY_PROJECT_ID,
CONFIG.BIGQUERY_DATASET_ID, reportConfig.NAME);
Logger.log('Truncated dataset %s.', reportConfig.NAME);
} else {
Logger.log('Table %s already exists. Will not recreate.',
reportConfig.NAME);
return;
}
}
// Create new table.
var table = BigQuery.newTable();
var schema = BigQuery.newTableSchema();
var bigQueryFields = [];
// Add account column to table.
var accountFieldSchema = BigQuery.newTableFieldSchema();
accountFieldSchema.description = 'AccountId';
accountFieldSchema.name = 'AccountId';
accountFieldSchema.type = 'STRING';
bigQueryFields.push(accountFieldSchema);
// Add each field to table schema.
var fieldNames = Object.keys(reportConfig.FIELDS);
for (var i = 0; i < fieldNames.length; i++) {
var fieldName = fieldNames[i];
var bigQueryFieldSchema = BigQuery.newTableFieldSchema();
bigQueryFieldSchema.description = fieldName;
bigQueryFieldSchema.name = fieldName;
bigQueryFieldSchema.type = reportConfig.FIELDS[fieldName];
bigQueryFields.push(bigQueryFieldSchema);
}
schema.fields = bigQueryFields;
table.schema = schema;
table.friendlyName = reportConfig.NAME;
table.tableReference = BigQuery.newTableReference();
table.tableReference.datasetId = CONFIG.BIGQUERY_DATASET_ID;
table.tableReference.projectId = CONFIG.BIGQUERY_PROJECT_ID;
table.tableReference.tableId = reportConfig.NAME;
table = BigQuery.Tables.insert(table, CONFIG.BIGQUERY_PROJECT_ID,
CONFIG.BIGQUERY_DATASET_ID);
Logger.log('Created table with id %s.', table.id);
}
function tableExists(tableId) {
// Get a list of all tables in the dataset.
var tables = BigQuery.Tables.list(CONFIG.BIGQUERY_PROJECT_ID,
CONFIG.BIGQUERY_DATASET_ID);
var tableExists = false;
// Iterate through each table and check for an id match.
if (tables.tables != null) {
for (var i = 0; i < tables.tables.length; i++) {
var table = tables.tables[i];
if (table.tableReference.tableId == tableId) {
tableExists = true;
break;
}
}
}
return tableExists;
}
function processReports(folder, accountId) {
var jobIds = [];
// Iterate over each report type.
for (var i = 0; i < CONFIG.REPORTS.length; i++) {
var reportConfig = CONFIG.REPORTS[i];
Logger.log('Running report %s for account %s', reportConfig.NAME,
accountId);
// Get data as csv
var csvData = retrieveAdwordsReport(reportConfig, accountId);
// If configured, back up data.
if (CONFIG.WRITE_DATA_TO_DRIVE) {
var fileName = reportConfig.NAME + '_' + accountId;
folder.createFile(fileName, csvData, MimeType.CSV);
Logger.log('Exported data to Drive folder ' +
CONFIG.DRIVE_FOLDER + ' for report ' + fileName);
}
// Convert to Blob format.
var blobData = Utilities.newBlob(csvData, 'application/octet-stream');
// Load data
var jobId = loadDataToBigquery(reportConfig, blobData);
jobIds.push(jobId);
}
return jobIds;
}
function retrieveAdwordsReport(reportConfig, accountId) {
var fieldNames = Object.keys(reportConfig.FIELDS);
var report = AdWordsApp.report(
'SELECT ' + fieldNames.join(',') +
' FROM ' + reportConfig.NAME + ' ' + reportConfig.CONDITIONS +
' DURING ' + CONFIG.DEFAULT_DATE_RANGE);
var rows = report.rows();
var csvRows = [];
// Header row
csvRows.push('AccountId,'+fieldNames.join(','));
// Iterate over each row.
while (rows.hasNext()) {
var row = rows.next();
var csvRow = [];
csvRow.push(accountId);
for (var i = 0; i < fieldNames.length; i++) {
var fieldName = fieldNames[i];
var fieldValue = row[fieldName].toString();
var fieldType = reportConfig.FIELDS[fieldName];
/* Strip off % and perform any other formatting here.
if ((fieldType == 'FLOAT' || fieldType == 'INTEGER') &&
fieldValue.charAt(fieldValue.length - 1) == '%') {
fieldValue = fieldValue.substring(0, fieldValue.length - 1);
}*/
// Add double quotes to any string values.
if (fieldType == 'STRING') {
fieldValue = fieldValue.replace(',', ''); //Handle fields with comma in value returned
fieldValue = fieldValue.replace('"', ''); //Handle fields with double quotes in value returned
fieldValue = fieldValue.replace('+', ''); //Handle fields with "+" in value returned
fieldValue = '"' + fieldValue + '"';
}
csvRow.push(fieldValue);
}
csvRows.push(csvRow.join(','));
}
Logger.log('Downloaded ' + reportConfig.NAME + ' for account ' + accountId +
' with ' + csvRows.length + ' rows.');
return csvRows.join('\n');
}
function getDriveFolder() {
var folders = DriveApp.getFoldersByName(CONFIG.DRIVE_FOLDER);
// Assume first folder is the correct one.
if (folders.hasNext()) {
Logger.log('Folder name found. Using existing folder.');
return folders.next();
}
return DriveApp.createFolder(CONFIG.DRIVE_FOLDER);
}
function loadDataToBigquery(reportConfig, data) {
function guid() {
function s4() {
return Math.floor((1 + Math.random()) * 0x10000)
.toString(16)
.substring(1);
}
return s4() + s4() + s4() + s4() + s4() + s4() + s4() + s4();
}
var makeId = guid();
var job = {
jobReference: {
jobId: makeId
},
configuration: {
load: {
destinationTable: {
projectId: CONFIG.BIGQUERY_PROJECT_ID,
datasetId: CONFIG.BIGQUERY_DATASET_ID,
tableId: reportConfig.NAME
},
skipLeadingRows: 1,
ignoreUnknownValues: true,
allowJaggedRows: true,
allowLargeResults: true
}
}
};
var insertJob = BigQuery.Jobs.insert(job, CONFIG.BIGQUERY_PROJECT_ID, data);
Logger.log('Load job started for %s. Check on the status of it here: ' +
'https://bigquery.cloud.google.com/jobs/%s', reportConfig.NAME,
CONFIG.BIGQUERY_PROJECT_ID);
return job.jobReference.jobId;
}
function waitTillJobsComplete(jobIdMap) {
var complete = false;
var remainingJobs = [];
var accountIds = Object.keys(jobIdMap);
for (var i = 0; i < accountIds.length; i++){
var accountJobIds = jobIdMap[accountIds[i]];
remainingJobs.push.apply(remainingJobs, accountJobIds);
}
while (!complete) {
if (AdWordsApp.getExecutionInfo().getRemainingTime() < 5){
Logger.log('Script is about to timeout, jobs ' + remainingJobs.join(',') +
' are still incomplete.');
}
remainingJobs = getIncompleteJobs(remainingJobs);
if (remainingJobs.length == 0) {
complete = true;
}
if (!complete) {
Logger.log(remainingJobs.length + ' jobs still being processed.');
// Wait 5 seconds before checking status again.
Utilities.sleep(5000);
}
}
Logger.log('All jobs processed.');
}
function getIncompleteJobs(jobIds) {
var remainingJobIds = [];
for (var i = 0; i < jobIds.length; i++) {
var jobId = jobIds[i];
var getJob = BigQuery.Jobs.get(CONFIG.BIGQUERY_PROJECT_ID, jobId);
if (getJob.status.state != 'DONE') {
remainingJobIds.push(jobId);
}
}
return remainingJobIds;
}
It appears the "Empty Response" error is being thrown on:
var insertJob = BigQuery.Jobs.insert(job, CONFIG.BIGQUERY_PROJECT_ID, data);
Have tried quite a few tweaks, but the answer doesn't appear to obvious to me. Thanks for any help!
I can be wrong but - I think that problem was with jobId because of issue with guid() function - missing "+" sign.
function guid() {
function s4() {
return Math.floor((1 + Math.random()) * 0x10000)
.toString(16)
.substring(1);
}
return s4() + s4() + s4() + s4() + s4() s4() + s4() + s4();
}
Why not to use jobId from Response like below?
var job = {
configuration: {
load: {
destinationTable: {
projectId: CONFIG.BIGQUERY_PROJECT_ID,
datasetId: CONFIG.BIGQUERY_DATASET_ID,
tableId: reportConfig.NAME
},
skipLeadingRows: 1,
ignoreUnknownValues: true,
allowJaggedRows: true,
allowLargeResults: true
}
}
};
var insertJob = BigQuery.Jobs.insert(job, CONFIG.BIGQUERY_PROJECT_ID, data);
Logger.log('Load job started for %s. Check on the status of it here: ' +
'https://bigquery.cloud.google.com/jobs/%s', reportConfig.NAME,
CONFIG.BIGQUERY_PROJECT_ID);
return insertJob.jobReference.jobId;
Added
In this case I would suggest to log jobId (makeId = guid()) and get job status following below link https://cloud.google.com/bigquery/docs/reference/v2/jobs/get#try-it
Enter ProjectId and JobId and you at least will see what is going on with your job!!
AdWords places a "--" in for null values. If you define your report fields as anything but string (e.g., float, integer, etc.) the insert will fail because it can't convert the dash dash to a float or integer.
Try setting all of your fields to string and see if that solves the problem.
Have you tried setting the WRITE_DATA_TO_DRIVE parameter to true to confirm that the report export is successful? How large is the result? I get the same error when attempting an insert greater than 10MB (~25k rows depending on columns). If the file export to Google Drive looks good, you can add a condition to the while loop in retrieveAdwordsReport to limit the file size. There was also a post on https://groups.google.com/forum/#!forum/adwords-scripts mentioning an issue when including AdNetworkType columns: https://groups.google.com/forum/#!searchin/adwords-scripts/adnetworktype2%7Csort:relevance/adwords-scripts/yK57JHCt3Cw/Cl1SjFaQBQAJ.
Limit result size:
var processedRows = 0;
// Iterate over each row.
while (rows.hasNext() && ++processedRows < 5000) {
var row = rows.next();
var csvRow = [];
csvRow.push(accountId);
if (processedRows % 1000 == 0)
{
Logger.log('Processed %s rows.',processedRows);
}
...
I am using 'dgrid/Grid' and dstore/RequestMemory for creating grid and storing data. Now I want to filter data according to values in the fields(see img). I am not sure how to filter data when using simple Dgrid and dstore.
var structure = [{
label : "Value Date",
field : "valueDate"
}, {
id: "currencyCol",
label : "Currency",
field : "currency"
}, {
label : "Nostro",
field : "nostroAgent"
}];
var store= new RequestMemory({
target: 'getReportData',
idProperty: "cashflowId",
headers: structure
});
// Create an instance of OnDemandGrid referencing the store
var grid = new(declare([Grid, Pagination, Selection]))({
collection: store,
columns: structure,
loadingMessage: 'Loading data...',
noDataMessage: 'No results found.',
minRowsPerPage: 50,
}, 'grid');
grid.startup();
on(document.getElementById("filter"), "click", function(event) {
event.preventDefault();
grid.set('collection', store.filter({
**currencyCol: "AED"**
.
.
.
}));
Any help would be appreciated or suggest if I use some diffrent store or grid.
I got the solution for my question. On filter button click I have written all my filtering logic and the final store will set to dgrid:
on(document.getElementById("filter"), "click", function(event) {
var store= new RequestMemory({
target: 'getReportData',
idProperty: "cashflowId",
headers: structure
});
var from=dijit.byId('from').value;
var to=dijit.byId('to').value;
var curr=dijit.byId('currency').value;
var nos=dijit.byId('nostro').value;
var authStatus=dijit.byId('authStatus').value;
var filterStore;
var finalStore=store;
var filter= new store.Filter();
var dateToFindFrom;
var dateToFindTo;
if (from != "" && from !== null) {
var yyyy = from.getFullYear().toString();
var mm = ((from.getMonth()) + 1).toString(); // getMonth() is zero-based
var dd = from.getDate().toString();
if(mm <= 9){
mm= "0" + mm;
}
if(dd <= 9){
dd= "0" + dd;
}
dateToFindFrom =yyyy + mm + dd;
filterStore= filter.gte('valueDate', dateToFindFrom);
finalStore=finalStore.filter(filterStore);
}
if (to != "" && to !== null) {
var yyyy = to.getFullYear().toString();
var mm = ((to.getMonth()) + 1).toString(); // getMonth() is zero-based
var dd = to.getDate().toString();
if(mm <= 9){
mm= "0" + mm;
}
if(dd <= 9){
dd= "0" + dd;
}
dateToFindTo =yyyy + mm + dd;
filterStore= filter.lte('valueDate', dateToFindTo); //.lte('valueDate', dateToFindTo);
finalStore=finalStore.filter(filterStore);
}
if(curr != "" && curr !== null) {
filterStore= filter.eq('currency', curr);
finalStore=finalStore.filter(filterStore);
}
if(nos != "" && nos !== null) {
filterStore= filter.eq('nostroAgent',nos);
finalStore=finalStore.filter(filterStore);
}
if(authStatus != "" && authStatus !== null) {
if (authStatus=='ALL') {
var both= [true, false];
filterStore= filter.in('approved', both);
finalStore=finalStore.filter(filterStore);
} else if (authStatus=='Authorised Only') {
filterStore= filter.eq('approved', true);
finalStore=finalStore.filter(filterStore);
} else if (authStatus=='Unauthorised Only') {
filterStore= filter.eq('approved', false);
finalStore=finalStore.filter(filterStore);
};
};
grid.set('collection', finalStore);
});
Where could I find some JavaScript code to parse CSV data?
You can use the CSVToArray() function mentioned in this blog entry.
<script type="text/javascript">
// ref: http://stackoverflow.com/a/1293163/2343
// This will parse a delimited string into an array of
// arrays. The default delimiter is the comma, but this
// can be overriden in the second argument.
function CSVToArray( strData, strDelimiter ){
// Check to see if the delimiter is defined. If not,
// then default to comma.
strDelimiter = (strDelimiter || ",");
// Create a regular expression to parse the CSV values.
var objPattern = new RegExp(
(
// Delimiters.
"(\\" + strDelimiter + "|\\r?\\n|\\r|^)" +
// Quoted fields.
"(?:\"([^\"]*(?:\"\"[^\"]*)*)\"|" +
// Standard fields.
"([^\"\\" + strDelimiter + "\\r\\n]*))"
),
"gi"
);
// Create an array to hold our data. Give the array
// a default empty first row.
var arrData = [[]];
// Create an array to hold our individual pattern
// matching groups.
var arrMatches = null;
// Keep looping over the regular expression matches
// until we can no longer find a match.
while (arrMatches = objPattern.exec( strData )){
// Get the delimiter that was found.
var strMatchedDelimiter = arrMatches[ 1 ];
// Check to see if the given delimiter has a length
// (is not the start of string) and if it matches
// field delimiter. If id does not, then we know
// that this delimiter is a row delimiter.
if (
strMatchedDelimiter.length &&
strMatchedDelimiter !== strDelimiter
){
// Since we have reached a new row of data,
// add an empty row to our data array.
arrData.push( [] );
}
var strMatchedValue;
// Now that we have our delimiter out of the way,
// let's check to see which kind of value we
// captured (quoted or unquoted).
if (arrMatches[ 2 ]){
// We found a quoted value. When we capture
// this value, unescape any double quotes.
strMatchedValue = arrMatches[ 2 ].replace(
new RegExp( "\"\"", "g" ),
"\""
);
} else {
// We found a non-quoted value.
strMatchedValue = arrMatches[ 3 ];
}
// Now that we have our value string, let's add
// it to the data array.
arrData[ arrData.length - 1 ].push( strMatchedValue );
}
// Return the parsed data.
return( arrData );
}
</script>
jQuery-CSV
It's a jQuery plugin designed to work as an end-to-end solution for parsing CSV into JavaScript data. It handles every single edge case presented in RFC 4180, as well as some that pop up for Excel/Google spreadsheet exports (i.e., mostly involving null values) that the specification is missing.
Example:
track,artist,album,year
Dangerous,'Busta Rhymes','When Disaster Strikes',1997
// Calling this
music = $.csv.toArrays(csv)
// Outputs...
[
["track", "artist", "album", "year"],
["Dangerous", "Busta Rhymes", "When Disaster Strikes", "1997"]
]
console.log(music[1][2]) // Outputs: 'When Disaster Strikes'
Update:
Oh yeah, I should also probably mention that it's completely configurable.
music = $.csv.toArrays(csv, {
delimiter: "'", // Sets a custom value delimiter character
separator: ';', // Sets a custom field separator character
});
Update 2:
It now works with jQuery on Node.js too. So you have the option of doing either client-side or server-side parsing with the same library.
Update 3:
Since the Google Code shutdown, jquery-csv has been migrated to GitHub.
Disclaimer: I am also the author of jQuery-CSV.
Here's an extremely simple CSV parser that handles quoted fields with commas, new lines, and escaped double quotation marks. There's no splitting or regular expression. It scans the input string 1-2 characters at a time and builds an array.
Test it at http://jsfiddle.net/vHKYH/.
function parseCSV(str) {
var arr = [];
var quote = false; // 'true' means we're inside a quoted field
// Iterate over each character, keep track of current row and column (of the returned array)
for (var row = 0, col = 0, c = 0; c < str.length; c++) {
var cc = str[c], nc = str[c+1]; // Current character, next character
arr[row] = arr[row] || []; // Create a new row if necessary
arr[row][col] = arr[row][col] || ''; // Create a new column (start with empty string) if necessary
// If the current character is a quotation mark, and we're inside a
// quoted field, and the next character is also a quotation mark,
// add a quotation mark to the current column and skip the next character
if (cc == '"' && quote && nc == '"') { arr[row][col] += cc; ++c; continue; }
// If it's just one quotation mark, begin/end quoted field
if (cc == '"') { quote = !quote; continue; }
// If it's a comma and we're not in a quoted field, move on to the next column
if (cc == ',' && !quote) { ++col; continue; }
// If it's a newline (CRLF) and we're not in a quoted field, skip the next character
// and move on to the next row and move to column 0 of that new row
if (cc == '\r' && nc == '\n' && !quote) { ++row; col = 0; ++c; continue; }
// If it's a newline (LF or CR) and we're not in a quoted field,
// move on to the next row and move to column 0 of that new row
if (cc == '\n' && !quote) { ++row; col = 0; continue; }
if (cc == '\r' && !quote) { ++row; col = 0; continue; }
// Otherwise, append the current character to the current column
arr[row][col] += cc;
}
return arr;
}
I have an implementation as part of a spreadsheet project.
This code is not yet tested thoroughly, but anyone is welcome to use it.
As some of the answers noted though, your implementation can be much simpler if you actually have DSV or TSV file, as they disallow the use of the record and field separators in the values. CSV, on the other hand, can actually have commas and newlines inside a field, which breaks most regular expression and split-based approaches.
var CSV = {
parse: function(csv, reviver) {
reviver = reviver || function(r, c, v) { return v; };
var chars = csv.split(''), c = 0, cc = chars.length, start, end, table = [], row;
while (c < cc) {
table.push(row = []);
while (c < cc && '\r' !== chars[c] && '\n' !== chars[c]) {
start = end = c;
if ('"' === chars[c]){
start = end = ++c;
while (c < cc) {
if ('"' === chars[c]) {
if ('"' !== chars[c+1]) {
break;
}
else {
chars[++c] = ''; // unescape ""
}
}
end = ++c;
}
if ('"' === chars[c]) {
++c;
}
while (c < cc && '\r' !== chars[c] && '\n' !== chars[c] && ',' !== chars[c]) {
++c;
}
} else {
while (c < cc && '\r' !== chars[c] && '\n' !== chars[c] && ',' !== chars[c]) {
end = ++c;
}
}
row.push(reviver(table.length-1, row.length, chars.slice(start, end).join('')));
if (',' === chars[c]) {
++c;
}
}
if ('\r' === chars[c]) {
++c;
}
if ('\n' === chars[c]) {
++c;
}
}
return table;
},
stringify: function(table, replacer) {
replacer = replacer || function(r, c, v) { return v; };
var csv = '', c, cc, r, rr = table.length, cell;
for (r = 0; r < rr; ++r) {
if (r) {
csv += '\r\n';
}
for (c = 0, cc = table[r].length; c < cc; ++c) {
if (c) {
csv += ',';
}
cell = replacer(r, c, table[r][c]);
if (/[,\r\n"]/.test(cell)) {
cell = '"' + cell.replace(/"/g, '""') + '"';
}
csv += (cell || 0 === cell) ? cell : '';
}
}
return csv;
}
};
csvToArray v1.3
A compact (645 bytes), but compliant function to convert a CSV string into a 2D array, conforming to the RFC4180 standard.
https://code.google.com/archive/p/csv-to-array/downloads
Common Usage: jQuery
$.ajax({
url: "test.csv",
dataType: 'text',
cache: false
}).done(function(csvAsString){
csvAsArray=csvAsString.csvToArray();
});
Common usage: JavaScript
csvAsArray = csvAsString.csvToArray();
Override field separator
csvAsArray = csvAsString.csvToArray("|");
Override record separator
csvAsArray = csvAsString.csvToArray("", "#");
Override Skip Header
csvAsArray = csvAsString.csvToArray("", "", 1);
Override all
csvAsArray = csvAsString.csvToArray("|", "#", 1);
Here's my PEG(.js) grammar that seems to do ok at RFC 4180 (i.e. it handles the examples at http://en.wikipedia.org/wiki/Comma-separated_values):
start
= [\n\r]* first:line rest:([\n\r]+ data:line { return data; })* [\n\r]* { rest.unshift(first); return rest; }
line
= first:field rest:("," text:field { return text; })*
& { return !!first || rest.length; } // ignore blank lines
{ rest.unshift(first); return rest; }
field
= '"' text:char* '"' { return text.join(''); }
/ text:[^\n\r,]* { return text.join(''); }
char
= '"' '"' { return '"'; }
/ [^"]
Try it out at http://jsfiddle.net/knvzk/10 or http://pegjs.majda.cz/online. Download the generated parser at https://gist.github.com/3362830.
Here's another solution. This uses:
a coarse global regular expression for splitting the CSV string (which includes surrounding quotes and trailing commas)
fine-grained regular expression for cleaning up the surrounding quotes and trailing commas
also, has type correction differentiating strings, numbers, boolean values and null values
For the following input string:
"This is\, a value",Hello,4,-123,3.1415,'This is also\, possible',true,
The code outputs:
[
"This is, a value",
"Hello",
4,
-123,
3.1415,
"This is also, possible",
true,
null
]
Here's my implementation of parseCSVLine() in a runnable code snippet:
function parseCSVLine(text) {
return text.match( /\s*(\"[^"]*\"|'[^']*'|[^,]*)\s*(,|$)/g ).map( function (text) {
let m;
if (m = text.match(/^\s*,?$/)) return null; // null value
if (m = text.match(/^\s*\"([^"]*)\"\s*,?$/)) return m[1]; // Double Quoted Text
if (m = text.match(/^\s*'([^']*)'\s*,?$/)) return m[1]; // Single Quoted Text
if (m = text.match(/^\s*(true|false)\s*,?$/)) return m[1] === "true"; // Boolean
if (m = text.match(/^\s*((?:\+|\-)?\d+)\s*,?$/)) return parseInt(m[1]); // Integer Number
if (m = text.match(/^\s*((?:\+|\-)?\d*\.\d*)\s*,?$/)) return parseFloat(m[1]); // Floating Number
if (m = text.match(/^\s*(.*?)\s*,?$/)) return m[1]; // Unquoted Text
return text;
} );
}
let data = `"This is\, a value",Hello,4,-123,3.1415,'This is also\, possible',true,`;
let obj = parseCSVLine(data);
console.log( JSON.stringify( obj, undefined, 2 ) );
Here's my simple vanilla JavaScript code:
let a = 'one,two,"three, but with a comma",four,"five, with ""quotes"" in it.."'
console.log(splitQuotes(a))
function splitQuotes(line) {
if(line.indexOf('"') < 0)
return line.split(',')
let result = [], cell = '', quote = false;
for(let i = 0; i < line.length; i++) {
char = line[i]
if(char == '"' && line[i+1] == '"') {
cell += char
i++
} else if(char == '"') {
quote = !quote;
} else if(!quote && char == ',') {
result.push(cell)
cell = ''
} else {
cell += char
}
if ( i == line.length-1 && cell) {
result.push(cell)
}
}
return result
}
I'm not sure why I couldn't get Kirtan's example to work for me. It seemed to be failing on empty fields or maybe fields with trailing commas...
This one seems to handle both.
I did not write the parser code, just a wrapper around the parser function to make this work for a file. See attribution.
var Strings = {
/**
* Wrapped CSV line parser
* #param s String delimited CSV string
* #param sep Separator override
* #attribution: http://www.greywyvern.com/?post=258 (comments closed on blog :( )
*/
parseCSV : function(s,sep) {
// http://stackoverflow.com/questions/1155678/javascript-string-newline-character
var universalNewline = /\r\n|\r|\n/g;
var a = s.split(universalNewline);
for(var i in a){
for (var f = a[i].split(sep = sep || ","), x = f.length - 1, tl; x >= 0; x--) {
if (f[x].replace(/"\s+$/, '"').charAt(f[x].length - 1) == '"') {
if ((tl = f[x].replace(/^\s+"/, '"')).length > 1 && tl.charAt(0) == '"') {
f[x] = f[x].replace(/^\s*"|"\s*$/g, '').replace(/""/g, '"');
} else if (x) {
f.splice(x - 1, 2, [f[x - 1], f[x]].join(sep));
} else f = f.shift().split(sep).concat(f);
} else f[x].replace(/""/g, '"');
} a[i] = f;
}
return a;
}
}
Regular expressions to the rescue! These few lines of code handle properly quoted fields with embedded commas, quotes, and newlines based on the RFC 4180 standard.
function parseCsv(data, fieldSep, newLine) {
fieldSep = fieldSep || ',';
newLine = newLine || '\n';
var nSep = '\x1D';
var qSep = '\x1E';
var cSep = '\x1F';
var nSepRe = new RegExp(nSep, 'g');
var qSepRe = new RegExp(qSep, 'g');
var cSepRe = new RegExp(cSep, 'g');
var fieldRe = new RegExp('(?<=(^|[' + fieldSep + '\\n]))"(|[\\s\\S]+?(?<![^"]"))"(?=($|[' + fieldSep + '\\n]))', 'g');
var grid = [];
data.replace(/\r/g, '').replace(/\n+$/, '').replace(fieldRe, function(match, p1, p2) {
return p2.replace(/\n/g, nSep).replace(/""/g, qSep).replace(/,/g, cSep);
}).split(/\n/).forEach(function(line) {
var row = line.split(fieldSep).map(function(cell) {
return cell.replace(nSepRe, newLine).replace(qSepRe, '"').replace(cSepRe, ',');
});
grid.push(row);
});
return grid;
}
const csv = 'A1,B1,C1\n"A ""2""","B, 2","C\n2"';
const separator = ','; // field separator, default: ','
const newline = ' <br /> '; // newline representation in case a field contains newlines, default: '\n'
var grid = parseCsv(csv, separator, newline);
// expected: [ [ 'A1', 'B1', 'C1' ], [ 'A "2"', 'B, 2', 'C <br /> 2' ] ]
You don't need a parser-generator such as lex/yacc. The regular expression handles RFC 4180 properly thanks to positive lookbehind, negative lookbehind, and positive lookahead.
Clone/download code at https://github.com/peterthoeny/parse-csv-js
Just throwing this out there.. I recently ran into the need to parse CSV columns with Javascript, and I opted for my own simple solution. It works for my needs, and may help someone else.
const csvString = '"Some text, some text",,"",true,false,"more text","more,text, more, text ",true';
const parseCSV = text => {
const lines = text.split('\n');
const output = [];
lines.forEach(line => {
line = line.trim();
if (line.length === 0) return;
const skipIndexes = {};
const columns = line.split(',');
output.push(columns.reduce((result, item, index) => {
if (skipIndexes[index]) return result;
if (item.startsWith('"') && !item.endsWith('"')) {
while (!columns[index + 1].endsWith('"')) {
index++;
item += `,${columns[index]}`;
skipIndexes[index] = true;
}
index++;
skipIndexes[index] = true;
item += `,${columns[index]}`;
}
result.push(item);
return result;
}, []));
});
return output;
};
console.log(parseCSV(csvString));
Personally I like to use deno std library since most modules are officially compatible with the browser
The problem is that the std is in typescript but official solution might happen in the future https://github.com/denoland/deno_std/issues/641 https://github.com/denoland/dotland/issues/1728
For now there is an actively maintained on the fly transpiler https://bundle.deno.dev/
so you can use it simply like this
<script type="module">
import { parse } from "https://bundle.deno.dev/https://deno.land/std#0.126.0/encoding/csv.ts"
console.log(await parse("a,b,c\n1,2,3"))
</script>
I have constructed this JavaScript script to parse a CSV in string to array object. I find it better to break down the whole CSV into lines, fields and process them accordingly. I think that it will make it easy for you to change the code to suit your need.
//
//
// CSV to object
//
//
const new_line_char = '\n';
const field_separator_char = ',';
function parse_csv(csv_str) {
var result = [];
let line_end_index_moved = false;
let line_start_index = 0;
let line_end_index = 0;
let csr_index = 0;
let cursor_val = csv_str[csr_index];
let found_new_line_char = get_new_line_char(csv_str);
let in_quote = false;
// Handle \r\n
if (found_new_line_char == '\r\n') {
csv_str = csv_str.split(found_new_line_char).join(new_line_char);
}
// Handle the last character is not \n
if (csv_str[csv_str.length - 1] !== new_line_char) {
csv_str += new_line_char;
}
while (csr_index < csv_str.length) {
if (cursor_val === '"') {
in_quote = !in_quote;
} else if (cursor_val === new_line_char) {
if (in_quote === false) {
if (line_end_index_moved && (line_start_index <= line_end_index)) {
result.push(parse_csv_line(csv_str.substring(line_start_index, line_end_index)));
line_start_index = csr_index + 1;
} // Else: just ignore line_end_index has not moved or line has not been sliced for parsing the line
} // Else: just ignore because we are in a quote
}
csr_index++;
cursor_val = csv_str[csr_index];
line_end_index = csr_index;
line_end_index_moved = true;
}
// Handle \r\n
if (found_new_line_char == '\r\n') {
let new_result = [];
let curr_row;
for (var i = 0; i < result.length; i++) {
curr_row = [];
for (var j = 0; j < result[i].length; j++) {
curr_row.push(result[i][j].split(new_line_char).join('\r\n'));
}
new_result.push(curr_row);
}
result = new_result;
}
return result;
}
function parse_csv_line(csv_line_str) {
var result = [];
//let field_end_index_moved = false;
let field_start_index = 0;
let field_end_index = 0;
let csr_index = 0;
let cursor_val = csv_line_str[csr_index];
let in_quote = false;
// Pretend that the last char is the separator_char to complete the loop
csv_line_str += field_separator_char;
while (csr_index < csv_line_str.length) {
if (cursor_val === '"') {
in_quote = !in_quote;
} else if (cursor_val === field_separator_char) {
if (in_quote === false) {
if (field_start_index <= field_end_index) {
result.push(parse_csv_field(csv_line_str.substring(field_start_index, field_end_index)));
field_start_index = csr_index + 1;
} // Else: just ignore field_end_index has not moved or field has not been sliced for parsing the field
} // Else: just ignore because we are in quote
}
csr_index++;
cursor_val = csv_line_str[csr_index];
field_end_index = csr_index;
field_end_index_moved = true;
}
return result;
}
function parse_csv_field(csv_field_str) {
with_quote = (csv_field_str[0] === '"');
if (with_quote) {
csv_field_str = csv_field_str.substring(1, csv_field_str.length - 1); // remove the start and end quotes
csv_field_str = csv_field_str.split('""').join('"'); // handle double quotes
}
return csv_field_str;
}
// Initial method: check the first newline character only
function get_new_line_char(csv_str) {
if (csv_str.indexOf('\r\n') > -1) {
return '\r\n';
} else {
return '\n'
}
}
Just use .split(','):
var str = "How are you doing today?";
var n = str.split(" ");