Jasmine test makes no pass/fail report under webdriver.io - testing

Running the following jasmine test under webdriver.io like this: node path/to/test/script.js, the test executes (web browser is pulled up, target page visited), and thanks to the last line, the jasmine 'it' functions (below) do execute (without the last line, they don't, although the 'describe' function still runs).
But jasmine doesn't provide any kind of report result for the 'it' tests and the 'expect' assertions; there's nothing on the console from jasmine. There's no 'pass/fail' result, and so forth.
How to get jasmine to make a report, and esp. one that is readable by Jenkins?
The problem test script:
var webdriverjs = require('foo-bar/node_modules/webdriverio');
var jasmine = require('foo-bar/node_modules/jasmine-node');
var options = {
port: 4445,
desiredCapabilities: {
browserName: process.argv[2] || 'phantomjs'
}
};
describe('my webdriverjs tests', function () {
var client;
jasmine.DEFAULT_TIMEOUT_INTERVAL = 9999999;
beforeEach(function() {
client = webdriverjs.remote(options);
client.init();
});
it('shows the correct title', function (done) {
client
.url('http://localhost:4444').getTitle(function(err, title) {
expect(title).toBe('foo bar');
}).call( done );
});
afterEach(function(done) {
client.end(done);
});
});
jasmine.getEnv().execute();
Note: Cross-posted here: https://groups.google.com/forum/#!topic/webdriverio/-EOrQ003B9I

I ran into some of the same challenges when I was looking into this. The big issue is that this test needs to be executed as a jasmine test, not a webdriver test.
decribe('my webdriverio tests with jasmine', function(){
var client;
beforeEach(function(){
client = require('path/to/webdriverio').remote({
desiredCapabilities: {browserName:'safari'}
}).init.url('https://www.stackoverflow');
}, 5000);
afterEach(function(done){
client.end(done);
}, 5000);
it('runs a very simple test',function(done){
client.getTitle(function(err,result){
expect(result).toBe('Stack Overflow');
}).call(done);
}, 5000);
});
Now to run this test, you would just run a typical jasmine-node command from your terminal.

It comes down to the naming convention you are using. First, you need to remove the last line: jasmine.getEnv().execute(); then run the jasmine-node command with the --matchall flag:
jasmine-node --matchall path/to/test/script.js
If you named your file script_spec.js, then you could run it without the --matchall flag.
This is also assuming you have jasmine-node installed globally. If you want to use the local node_modules dependency, then you need to run this command:
./node_modules/jasmine-node/bin/jasmine-node --matchall path/to/test/script.js

When you are using jasmine-node module you should run your spec with
node_modules/jasmine-node/bin/jasmine-node $TEST_DIRECTORY
And your test should end with *spec.js, *spec.coffee or *spec.litcoffee as docs said.
And jasmine.getEnv().execute(); and var jasmine = require('foo-bar/node_modules/jasmine-node'); should not be in your script.

Related

How do we use expect in WebdriverIO as a standalone script?

WebdriverIO has a sample of using it in a script:
https://webdriver.io/docs/gettingstarted#run-in-a-script
const { remote } = require('webdriverio');
;(async () => {
const browser = await remote({
capabilities: {
browserName: 'chrome'
}
})
await browser.url('https://webdriver.io')
const apiLink = await browser.$('=API')
await apiLink.click()
await browser.saveScreenshot('./screenshot.png')
await browser.deleteSession()
})()
However, there is no mentioning of how we can use expect, such as expect(elementFoo).toHaveText("Hello World") How can we have expect (and have one of mocha, jasmine, or cucumber like when we set up a webdriverio project using
npx wdio ./path/to/new/project
or is it already possible to use expect without using one of those three modules?)
Also, if we used npx wdio ./path/to/new/project to create a whole project, the expect(elementFoo).toHaveText("Hello World") knows how to wait a few seconds to finally say that it doesn't contain the text, which I am not sure if pure mocha, jasmine, or cucumber has that feature too.

How to be more verbose on tests?

I wanted to have more verbose output for each test step;
Any ideas on how could I best achieve this without adding console.log after each step ?
I tried to overload the t object as shown below but can't get to have it work more than one time in the output.
in mylib.js
exports.init = function(t) {
t.oTypeText = t.typeText;
t.typeText = function fn(selector, data, opts) {
console.log('typing text in '+selector+': '+data);
return t.oTypeText(selector, data, opts);
};
return;
};
in test.js
import { Selector } from 'testcafe';
const mylib = require('./mylib');
fixture("Getting Started")
.page("https://devexpress.github.io/testcafe/example");
test('My first test', async t => {
mylib.init(t);
await t.typeText('#developer-name', 'John Smith')
.selectText('#developer-name').pressKey('delete')
.typeText('#developer-name', 'new name')
.selectText('#developer-name').pressKey('delete')
.typeText('#developer-name', 'another name');
await t.click('#submit-button');
});
result is:
Using locally installed version of TestCafe.
Running tests in:
- Firefox 68.0.0 / Mac OS X 10.14.0
Getting Started
(node:62978) Warning: Setting the NODE_TLS_REJECT_UNAUTHORIZED environment variable to '0' makes TLS connections and HTTPS requests insecure by disabling certificate verification.
typing text in #developer-name: John Smith
✓ My first test
1 passed (4s)
TestCafe doesn't support this feature out of box. I've created suggestion for you use case - https://github.com/DevExpress/testcafe/issues/4001 in the TestCafe repository. We can use the way with action overriding right now, but theoretically it can broke some functionality.

Protractor asynchronous parallel testing on docker containers

for few days i struggle with parallel execution of tests using selenium docker.
Following scenario:
Define browsers in multiCapabilities with specs.
Deploy containers with selenium-hub, 2 firefox, 2 chrome nodes.
Run tests
Issues is appearing when, chrome and firefox are running the same spec in parallel.
Depending on the speed of execution, lets say if firefox is first and chrome second. (spec1 is running on both browsers at the same time).
Due dependency spec1 is successful on firefox (as expected) and on chrome it should fail with exception (as expected). Here it goes the interesting part:
firefox test ending, but chrome is hanging (the part where it throw exception) and test fails after the configured jasmine/test timeout, lets say 3 minutes with
"unresolved promise"....
Since i have await on the method, and i have wrapped it in try catch, the exception should go up to the test, where i have also wrap the test methods in try catch, and if there is an exception done.fail() should stop the test.
But it never goes to there... after long time of debugging, the only thing i can see its that exception is thrown and it never goes to the test where i should catch it and fail the test.
Configuration of multicapabilities
{
browserName: 'chrome',
shardTestFiles: true,
maxInstances: 2,
specs: [
'../spec/**/spec1.js'
]
},
{
browserName: 'firefox',
maxInstances: 2,
shardTestFiles: true,
marionette: true,
specs: [
'../spec/**/spec1.js'
]
},
Protractor specific:
SELENIUM_PROMISE_MANAGER: false,
seleniumAddress: 'ip of the selenium hub'
maxSessions: 4
framework: 'jasmine'
... and other custom not related props as loggers, reporters etc.
Test example:
describe('test 1', () => {
it('can done something', async (done) => {
try {
await doSomething();
} catch (e) {
done.fail(e);
}
done();
}, 1000 * 60 * 5);
}
if there is an exception from doSomething(), test should be forced to fail, but it hangs in parallel execution.
Do i miss something and/or can you suggest why it hangs, while executing the same test on different browsers?
If you need some more information please let me know.
Such kind of callbacks does not work with async. If you want to fail test you can do it easier:
describe('test 1', () => {
it('can done something', async () => {
try {
await doSomething();
} catch (e) {
throw new Error(e);
}
}
}
P.S. I highly recommend to use Selenoid for running e2e tests in containers.

mocha programmatically set vue error handler

I find myself writing this at the start of pretty much all of my unit tests in mocha:
it('should do something', (done) => {
Vue.config.errorHandler = done;
// do something aynchronous
});
By default, Vue catches all errors itself and logs them to the console, so mocha can't see them. This code makes sure that thrown errors fail the tests.
Is there a way with mocha to do this without having to start every single async test with this line of code? If I have to write / use a plugin, that's fine.
Try:
Vue.config.errorHandler = function (err, vm, info) {
throw err
}
in your test entry.

Running Knex Migrations Between Mocha Tests

I was using Mocha to test my Nodejs app with a test database. In order to reset the DB before each test I had the following code, which worked perfectly:
process.env.NODE_ENV = 'test';
var knex = require('../db/knex');
describe("Add Item", function() {
beforeEach(function(done) {
knex.migrate.rollback()
.then(function() {
knex.migrate.latest()
.then(function() {
return knex.seed.run()
.then(function() {
done();
});
});
});
});
...
I've since switched from mocha to mocha-casperjs for my integration tests, and now the knex migrations won't run. I'm given this error message with the exact same before each hook:
undefined is not an object (evaluating 'knex.migrate.rollback')
phantomjs://platform/new-item.js:12:17
value#phantomjs://platform/mocha-casperjs.js:114:20
callFnAsync#phantomjs://platform/mocha.js:4314:12
run#phantomjs://platform/mocha.js:4266:18
next#phantomjs://platform/mocha.js:4630:13
phantomjs://platform/mocha.js:4652:9
timeslice#phantomjs://platform/mocha.js:12620:27
I'm pretty sure that migration functionality is not included in webpack build. If you go to http://knexjs.org/ open up debug console and checkout different clients e.g. mysql.migrate you see that there are no functions declared at all.
Actually you can check it out with node too if you explicitly load webpack build instead of node lib.
// load webpack build instead of node build...
let knex = require('knex/build/knex')({client : 'pg'});
console.log(knex.migrate);
// outputs: {}
So... the question is why are you trying to run your tests on PhantomJS browser instead of node.js?