response.success() is not a function - parse-server

I have installed parse-server-example and I have uploaded my cloud code. I have a function there but I got error response.success is no function.
Parse.Cloud.define("submitWeeklyScore", function (request, response) {
const gameScore = new Parse.Object("WeeklyGameScore");
gameScore.set("id", request.params.id);
gameScore.set("score", request.params.score);
gameScore.set("name", request.params.name);
gameScore.set("family", request.params.family);
gameScore.set("username", request.params.username);
gameScore.save(null, {useMasterKey: true}).then((saveResult) => {
var playerScore = saveResult.get("score");
var playerCreatedAt = saveResult.createdAt;
var objectId = saveResult.id;
var mainQuery = new Parse.Query("WeeklyGameScore");
mainQuery.greaterThanOrEqualTo("score", playerScore);
mainQuery.lessThan("createdAt", playerCreatedAt);
mainQuery.count({useMasterKey: true}).then((count) => {
var obj = {rank: count + 1, objId: objectId};
response.success(obj);
}, (error) => {
response.error(error);
});
}, (error) => {
response.error(error);
});
});
if I replace response.success(obj); with return obj; I get this :
info: Ran cloud function submitWeeklyScore for user undefined with:
Input: {"score":1337,"rubikaId":11111,"name":"","family":"","username":""}
Result: undefined {"functionName":"submitWeeklyScore","params": {"score":1337,"rubikaId":11111,"name":"","family":"","username":""}}
The object would be save without any problem but there is no result.

Parse Server example installs the latest version of parse server. Version 3.0.0 of Parse Server removed response.success(). Please see the migration guide for details on updating your cloud code.

Related

Write rows to BigQuery via nodejs BigQuery Storage Write API

It seems quite new, but just hoping someone here has been able to use nodejs to write directly to BigQuery storage using #google-cloud/bigquery-storage.
There is an explanation of how the overall backend API works and how to write a collection of rows atomically using BigQuery Write API but no such documentation for nodejs yet. A recent release 2.7.0 documents the addition of said feature but there is no documentation, and the code is not easily understood.
There is an open issue requesting an example but thought I'd try my luck to see if anyone has been able to use this API yet.
Suppose you have a BigQuery table called student with three columns id,name and age. Following steps will get you to load data into the table with nodejs storage write api.
Define student.proto file as follows
syntax = "proto2";
message Student {
required int64 id = 1;
optional string name = 2;
optional int64 age = 3;
}
Run the following at the command prompt
protoc --js_out=import_style=commonjs,binary:. student.proto
It should generate student_pb.js file in the current directory.
Write the following js code in the current directory and run it
const {BigQueryWriteClient} = require('#google-cloud/bigquery-storage').v1;
const st = require('./student_pb.js')
const type = require('#google-cloud/bigquery-storage').protos.google.protobuf.FieldDescriptorProto.Type
const mode = require('#google-cloud/bigquery-storage').protos.google.cloud.bigquery.storage.v1.WriteStream.Type
const storageClient = new BigQueryWriteClient();
const parent = `projects/${project}/datasets/${dataset}/tables/student`
var writeStream = {type: mode.PENDING}
var student = new st.Student()
var protoDescriptor = {}
protoDescriptor.name = 'student'
protoDescriptor.field = [{'name':'id','number':1,'type':type.TYPE_INT64},{'name':'name','number':2,'type':type.TYPE_STRING},{'name':'age','number':3,'type':type.TYPE_INT64}]
async function run() {
try {
var request = {
parent,
writeStream
}
var response = await storageClient.createWriteStream(request);
writeStream = response[0].name
var serializedRows = []
//Row 1
student.setId(1)
student.setName('st1')
student.setAge(15)
serializedRows.push(student.serializeBinary())
//Row 2
student.setId(2)
student.setName('st2')
student.setAge(15)
serializedRows.push(student.serializeBinary())
var protoRows = {
serializedRows
}
var proto_data = {
writerSchema: {protoDescriptor},
rows: protoRows
}
// Construct request
request = {
writeStream,
protoRows: proto_data
};
// Insert rows
const stream = await storageClient.appendRows();
stream.on('data', response => {
console.log(response);
});
stream.on('error', err => {
throw err;
});
stream.on('end', async () => {
/* API call completed */
try {
var response = await storageClient.finalizeWriteStream({name: writeStream})
response = await storageClient.batchCommitWriteStreams({parent,writeStreams: [writeStream]})
}
catch(err) {
console.log(err)
}
});
stream.write(request);
stream.end();
}
catch(err) {
console.log(err)
}
}
run();
Make sure your environment variables are set correctly to point to the file containing google cloud credentials.
Change project and dataset values accordingly.

Node js TypeError: Cannot read property 'nick' of undefined

The Issiue
Issue is basically if i try to combine query+variable or put it in "fake pdo" style, it does not work.
Code
if(page == "profile"){
var user = "Failed to load.";
con.query('SELECT * FROM users WHERE id = '+con.escape(parseInt(userIDRequested)), function (err, result, fields) {
//console.log(result);
user= result[0].nick;
io.emit('userinfo', { nick: user});
});
}
app.get("/user/:start", function(req, res){
page = "profile";
var user_id = req.params['start'];
pageid = user_id;
if(didshitgetupdated == false){
useridRequested = pageid;
didshitgetupdated = true;
}
res.writeHeader(200, {"Content-Type": "text/html"});
let btlib = btNetLib(res);
btlib.btSend(navbar);
btlib.btSendFile("profile/index.html");
finishConnection(res);
})
but for some reason i get this:
TypeError: Cannot read property 'nick' of undefined
at Query.<anonymous> (D:\bricktalenode\bricktale.js:36:29)
at Query.<anonymous> (D:\bricktalenode\node_modules\mysql\lib\Connection.js:526:10)
at Query._callback (D:\bricktalenode\node_modules\mysql\lib\Connection.js:488:16)
at Query.Sequence.end (D:\bricktalenode\node_modules\mysql\lib\protocol\sequences\Sequence.js:83:24)
at Query._handleFinalResultPacket (D:\bricktalenode\node_modules\mysql\lib\protocol\sequences\Query.js:149:8)
at Query.EofPacket (D:\bricktalenode\node_modules\mysql\lib\protocol\sequences\Query.js:133:8)
at Protocol._parsePacket (D:\bricktalenode\node_modules\mysql\lib\protocol\Protocol.js:291:23)
at Parser._parsePacket (D:\bricktalenode\node_modules\mysql\lib\protocol\Parser.js:433:10)
at Parser.write (D:\bricktalenode\node_modules\mysql\lib\protocol\Parser.js:43:10)
at Protocol.write (D:\bricktalenode\node_modules\mysql\lib\protocol\Protocol.js:38:16)
Things i tried
making a if condition to check it
(did not helped)
connecting database using another thing
i used same query(changed variable for php though) in php to access, query is ok, so issue on my node.js code then.
waiting for some time ( did not helped)
parse int
nothing really helped that i done
but for some reason i get stuff correctly using console.log(result);
but i cant fetch them using the code.
The results
[ RowDataPacket {
nick: '[MODERATED 5]',
id: 5,
password: 'fdaf40dc5531c0acf82911892f552f0a',
banned: 0,
coins: 0,
registerdate: 2021-05-10T02:55:45.000Z,
status: 'BETA STATUS',
rank: 'Player',
rep: 1,
token: 'token',
lastonline: '2021-05-10 03:05:08',
lastdaily: 2021-05-19T07:04:34.000Z } ]
but i cannot access it
app.get("/user", function(req, res){
page = "profile";
res.writeHeader(200, {"Content-Type": "text/html"});
let btlib = btNetLib(res);
btlib.btSend(navbar);
btlib.btSendFile("profile/index.html");
var sql = 'SELECT * FROM users WHERE id = ?';
con.query(sql, req.query.id, function (err, result) {
if (err) throw err;
setInterval(() => {
var bricklet;
bricklet = result[0].nick;
console.log(bricklet);
io.emit('userinfo', { nick: bricklet});
},150);
});
finishConnection(res);
})
moving stuff inside of my app.get solved it.

Aurelia PhantomJs target is undefined

We have an application runs on node , express server and aureliajs . We want to enable seo with prerendering. So we installed prerender.io and prerender-node.
But while trying to render pages with prerender , phantomjs give error TypeError: undefined is not an object (evaluating 'target.__useDefault')
and the code is :
function ensureOriginOnExports(executed, name) {
var target = executed;
var key = void 0;
var exportedValue = void 0;
if (target.__useDefault) {
target = target['default'];
}
.
.
.
in vendor-bundle.js
ensureOriginOnExports used in two places first one :
DefaultLoader.prototype.loadModule = function (id) {
var _this2 = this;
var existing = this.moduleRegistry[id];
if (existing !== undefined) {
return Promise.resolve(existing);
}
return new Promise(function (resolve, reject) {
require([id], function (m) {
_this2.moduleRegistry[id] = m;
resolve(ensureOriginOnExports(m, id));
}, reject);
});
};
second one :
DefaultLoader.prototype.loadModule = function (id) {
var _this3 = this;
return System.normalize(id).then(function (newId) {
var existing = _this3.moduleRegistry[newId];
if (existing !== undefined) {
return Promise.resolve(existing);
}
return System.import(newId).then(function (m) {
_this3.moduleRegistry[newId] = m;
return ensureOriginOnExports(m, newId);
});
});
};
So the solition to my problem was , first i used bluebird polyfill then i had to add create intl.js polyfill under /src/intl.js .
also i had to load babel-polyfill too.
It looks like phantom js looks that file for i18n support.

Downloading Excel file via aurelia-http-client

I am working on a task, in which I have to download a report in xlsx format. The report file is generated successfully from server, and is received on client side (aurelia-http-client) as well but I don't know how to go further with downloading.
I would do something like in this answer https://stackoverflow.com/a/30270714/6677648
... that would end up in something like a response interceptor in Aurelia like this:
.withResponseType('blob')
.withInterceptor({
response(message) {
var defaultFileName = "default.txt";
var disposition = message.headers.headers['content-disposition']?message.headers.headers['content-disposition']:message.headers.headers['Content-Disposition'];
if (disposition) {
var match = disposition.match(/.*filename=\"?([^;\"]+)\"?.*/);
if (match[1])
defaultFileName = match[1];
}
defaultFileName = defaultFileName.replace(/[<>:"\/\\|?*]+/g, '_');
if (navigator.msSaveBlob)
return navigator.msSaveBlob(message.response, defaultFileName);
var blobUrl = window.URL.createObjectURL(message.response);
var anchor = document.createElement('a');
anchor.download = defaultFileName;
anchor.href = blobUrl;
document.body.appendChild(anchor);
anchor.click();
document.body.removeChild(anchor);
}
})
I used the downloadjs library. Install the library, add it to your aurelia.json and then add
import * as download from 'downloadjs'
Then write your code as follows:
this.httpClient.fetch('your/url/here')
.then((response: Response) => response.blob())
.then((blob: Blob) => download(blob, 'filename.extension', 'mime type of the file'));
And voila, your file will be downloaded.
Helo with .withInterceptor() was generated errors in the response, change it to fix the error in no responce and unload multiple files simultaneously.
getLogsCsv(param) {
this.http.configure(config => {
config
.withResponseType('blob');
});
return this.http.get("/admin/api/logs" + param)
.then(response => {
if (response.statusCode == 200) {
var defaultFileName = "FileName.csv";
var blobUrl = window.URL.createObjectURL(response.response);
var anchor = document.createElement('a');
anchor.download = defaultFileName;
anchor.href = blobUrl;
document.body.appendChild(anchor);
anchor.click();
document.body.removeChild(anchor);
return response.content;
} else {
console.log('response was not ok.');
console.log(response);
}
})
.catch(error => {
console.log(error);
});
}

Import SQL dump within Node environment

I'd like a npm script to create/configure/etc. and finally import a SQL dump. The entire creation, configuring, etc. is all working, however, I cannot get the import to work. The data never is inserted. Here's what I have (nevermind the nested callback as they'll be turned into promises):
connection.query(`DROP DATABASE IF EXISTS ${config.database};`, err => {
connection.query(`CREATE DATABASE IF NOT EXISTS ${config.database};`, err => {
connection.query('use DATABASENAME', err => {
const sqlDumpPath = path.join(__dirname, 'sql-dump/sql-dump.sql');
connection.query(`SOURCE ${sqlDumpPath}`, err => {
connection.end(err => resolve());
});
})
});
});
I also tried the following with Sequelize (ORM):
return new Promise(resolve => {
const sqlDumpPath = path.join(__dirname, 'sql-dump/sql-dump.sql');
fs.readFile('./sql/dump.sql', 'utf-8', (err, data) => {
sequelize
.query(data)
.then(resolve)
.catch(console.error);
});
});
Here's how I set up my initial Sequelized import using the migrations framework. There is plenty of going on here but in short I:
find the latest sql-dump in the migrations folder
read the file using fs
split the text into queries
check if its a valid query and if so apply some cleaning that my data required (see related post)
push an array full of queries - I start with making sure that the database is clean by calling the this.down first
run everything as a promise (as suggested here) using the mapSeries (not the map)
Using sequelize-cli you can in your shell create a migration by writing:
sequelize migration:create
And you will automatically have the file where you enter the code below. In order to execute the migration you simply write:
sequelize db:migrate
"use strict";
const promise = require("bluebird");
const fs = require("fs");
const path = require("path");
const assert = require("assert");
const db = require("../api/models"); // To be able to run raw queries
const debug = require("debug")("my_new_api");
// I needed this in order to get some encoding issues straight
const Aring = new RegExp(String.fromCharCode(65533) +
"\\" + String.fromCharCode(46) + "{1,3}", "g");
const Auml = new RegExp(String.fromCharCode(65533) +
String.fromCharCode(44) + "{1,3}", "g");
const Ouml = new RegExp(String.fromCharCode(65533) +
String.fromCharCode(45) + "{1,3}", "g");
module.exports = {
up: function (queryInterface, Sequelize) {
// The following section allows me to have multiple sql-files and only use the last dump
var last_sql;
for (let fn of fs.readdirSync(__dirname)){
if (fn.match(/\.sql$/)){
fn = path.join(__dirname, fn);
var stats = fs.statSync(fn);
if (typeof last_sql === "undefined" ||
last_sql.stats.mtime < stats.mtime){
last_sql = {
filename: fn,
stats: stats
};
}
}
}
assert(typeof last_sql !== "undefined", "Could not find any valid sql files in " + __dirname);
// Split file into queries
var queries = fs.readFileSync(last_sql.filename).toString().split(/;\n/);
var actions = [{
query: "Running the down section",
exec: this.down
}]; // Clean database by calling the down first
for (let i in queries){
// Skip empty queries and the character set information in the 40101 section
// as this would most likely require a multi-query set-up
if (queries[i].trim().length == 0 ||
queries[i].match(new RegExp("/\\*!40101 .+ \\*/"))){
continue;
}
// The manual fixing of encoding
let clean_query = queries[i]
.replace(Aring, "Å")
.replace(Ouml, "Ö")
.replace(Auml, "Ä");
actions.push({
query: clean_query.substring(0, 200), // We save a short section of the query only for debugging purposes
exec: () => db.sequelize.query(clean_query)
});
}
// The Series is important as the order isn't retained with just map
return promise.mapSeries(actions, function(item) {
debug(item.query);
return item.exec();
}, { concurrency: 1 });
},
down: function (queryInterface, Sequelize) {
var tables_2_drop = [
"items",
"users",
"usertypes"
];
var actions = [];
for (let tbl of tables_2_drop){
actions.push({
// The created should be created_at
exec: () => db.sequelize.query("DROP TABLE IF EXISTS `" + tbl +"`")
});
}
return promise.map(actions, function(item) {
return item.exec();
}, { concurrency: 1 });/**/
}
};
Based loosely on Max Gordon's answer, here's my code to run a MySQL Dump file from NodeJs/Sequelize:
"use strict";
const fs = require("fs");
const path = require("path");
/**
* Start off with a MySQL Dump file, import that, and then migrate to the latest version.
*
* #param dbName {string} the name of the database
* #param mysqlDumpFile {string} The full path to the file to import as a starting point
*/
module.exports.migrateFromFile = function(dbName, mysqlDumpFile) {
let sequelize = createSequelize(dbName);
console.log("Importing from " + mysqlDumpFile + "...");
let queries = fs.readFileSync(mysqlDumpFile, {encoding: "UTF-8"}).split(";\n");
console.log("Importing dump file...");
// Setup the DB to import data in bulk.
let promise = sequelize.query("set FOREIGN_KEY_CHECKS=0"
).then(() => {
return sequelize.query("set UNIQUE_CHECKS=0");
}).then(() => {
return sequelize.query("set SQL_MODE='NO_AUTO_VALUE_ON_ZERO'");
}).then(() => {
return sequelize.query("set SQL_NOTES=0");
});
console.time("Importing mysql dump");
for (let query of queries) {
query = query.trim();
if (query.length !== 0 && !query.match(/\/\*/)) {
promise = promise.then(() => {
console.log("Executing: " + query.substring(0, 100));
return sequelize.query(query, {raw: true});
})
}
}
return promise.then(() => {
console.timeEnd("Importing mysql dump");
console.log("Migrating the rest of the way...");
console.time("Migrating after importing mysql dump");
return exports.migrateUp(dbName); // Run the rest of your migrations
}).then(() => {
console.timeEnd("Migrating after importing mysql dump");
});
};