But it works with chromium.
playwright: 1.8.0
node: 14.15
Ubuntu: 20.04
this is my code.
const playwright = require("playwright");
(async () => {
for (const browserType of ["chromium", "firefox", "webkit"]) {
const browser = await playwright[browserType].launch();
const context = await browser.newContext();
const page = await context.newPage();
await page.goto("http://whatsmyuseragent.org/");
await page.screenshot({ path: `example-${browserType}.png` });
await browser.close();
}
})();
On ubuntu 20.04, I was getting errors when running my first playwright script:
node first_script.js after installing via npm i -D playwright (playwright v1.10.0 and v10.19.0) per playwright install instructions
const { webkit } = require('playwright');
(async () => {
const browser = await webkit.launch();
const page = await browser.newPage();
await page.goto('http://whatsmyuseragent.org/');
await page.screenshot({ path: `whatsmyuseragent.png` });
await browser.close();
})();
saw errors that included:
"error while loading shared libraries: libharfbuzz-icu.so.0: cannot open shared object file: No such file or directory"
Per several bugs #1935 #2621, running the following allowed me to run successfully:
sudo apt-get install libgles2 gstreamer1.0-libav libharfbuzz-icu0 libwoff1 libgstreamer-plugins-bad1.0-0 libgstreamer-gl1.0-0 libwebp-dev
then tried to run 2nd script with chrome(chromium), firefox and webkit on same ubuntu 20.04 node second_script.js
const playwright = require('playwright');
(async () => {
for (const browserType of ['chromium', 'firefox', 'webkit']) {
const browser = await playwright[browserType].launch();
const context = await browser.newContext();
const page = await context.newPage();
await page.goto('http://whatsmyuseragent.org/');
await page.screenshot({ path: `example-${browserType}.png` });
await browser.close();
}
})();
and got errors:
[pid=532053][err] XPCOMGlueLoad error for file /home/playwright/.cache/ms-playwright/firefox-1238/firefox/libxul.so:
[pid=532053][err] libdbus-glib-1.so.2: cannot open shared object file: No such file or directory
[pid=532053][err] Couldn't load XPCOM.
installed firefox and 2nd script ran successfully:
sudo apt-get install firefox
I solved it I was missing this library libgstreamer-plugins-bad1.0-0:amd64
Related
I am trying to connect to a Databricks SQL endpoint using NodeJS. I followed the instructions on the "Connection Details" tab of my SQL endpoint. As described, I am running Node version 14 or higher, and installed the connector npm package as follows:
npm i #databricks/sql
I used the code provided, included below (I made sure to use the correct host name and access token). I did not change the SQL code from the default (SELECT 1).
const { DBSQLClient } = require('#databricks/sql');
var token = "dapi_MY_ACCESS_TOKEN";
var server_hostname = "MY_HOSTNAME.cloud.databricks.com";
var http_path = "/sql/1.0/endpoints/a8e8b6cfcc6a190f";
const client = new DBSQLClient();
const utils = DBSQLClient.utils;
client.connect(
options = {
token: token,
host: server_hostname,
path: http_path
}).then(
async client => {
const session = await client.openSession();
const queryOperation = await session.executeStatement(
statement = "SELECT 1",
options = { runAsync: true });
await utils.waitUntilReady(
operation = queryOperation,
progress = false,
callback = () => {});
await utils.fetchAll(
operation = queryOperation
);
await queryOperation.close();
const result = utils.getResult(
operation = queryOperation
).getValue();
console.table(result);
await session.close();
client.close();
}).catch(error => {
console.log(error);
});
When I run the code, I get the following error message:
node read_databricks.cjs
TypeError: Cannot read properties of undefined (reading 'waitUntilReady')
at /Users/vijay.balasubramaniam/test/records-to-cards/read_databricks.cjs:23:19
at process.processTicksAndRejections (node:internal/process/task_queues:95:5)
I also tried running the above code within the node REPL, but got the same results. Am I missing a step?
I ran into the same issue.
Delete your package-lock.json. Run npm install and make sure that in the package-lock.json file the version is pointing to beta.1 and not beta.2.
"node_modules/#databricks/sql": {
"version": "0.1.8-beta.1",
}
This is the code
const createTestCafe = require('testcafe');
let testcafe = null;
let connection = null;
const angulartests = [
'tests/Selector.test.ts'
]
const concurrency = 2;
var processFailed = false;
createTestCafe('localhost', 1337, 1338)
.then(async tc => {
testcafe = tc;
return testcafe.createBrowserConnection();
})
.then(async bc => {
connection = bc;
console.log(connection.url);
const runner = testcafe.createRunner();
const angulartestfailedcount = await runner
.src(angulartests)
// .browsers(['chrome'])
.browsers(connection)
//.{speed : (0.1) }
.screenshots('C:/dbm',true,'${DATE}_${TIME}/${FIXTURE}/${TEST}_${TEST_INDEX}.png')
// .concurrency(concurrency)
.reporter('xunit', 'C:/dbm/Labsession.xml').run();
return angulartestfailedcount;
})
.then(failedCount => {
console.log('Tests failed: ' + failedCount);
testcafe.close();
if (!failedCount)
process.exit(0);
process.exit(-1)
});
Result:
This is the URL generated:
http://localhost:1337/browser/connect/yX3Zt2j
Currently, to run in another machine's browser, we need to manually connect to that machine and run on the browser.
But Remote execution is something automatically runs on other Desktop.
In this case how to run the generated URL on other desktops without manual interruption.
At present, TestCafe doesn't have a such functionality. There is a suggestion for similar feature in the TestCafe repository on Github. Track it to be informed about progress.
I'm using lighthouse npm to test some random URLs... Sometime it fails with leaving
['audits']['interactive']['rawValue']
and
['categories']['performance']['score']
empty.
Is there something in the config to avoid this or is "retry until it does not fail anymore" my best change.
That's my setup. It's puppeteer + lighthouse
Start Puppeteer + open URL
const browser = await puppeteer.launch({ args: ['--no-sandbox'] });
const page = await browser.newPage()
await page.goto(urlToTest, {
waitUntil: 'networkidle2',
timeout: 0
});
//Start Lighthouse
let opts = {
chromeFlags: ['--show-paint-rects'],
output: 'html',
throttlingMethod: 'provided',
throttling: {
throughputKbps: 8000,
downloadThroughputKbps: 8000,
uploadThroughputKbps: 2000
}
};
opts.port = (new URL(browser.wsEndpoint())).port;
let lighthouseResults = await lighthouse(urlToTest, opts, config = null);
Chromium is getting hanged if header is modified using puppeteer
Puppeteer version:1.12.2
Platform / OS version: MAC / UBUNTU
'use strict';
const puppeteer = require('puppeteer');
(async () => {
try {
let browser = await puppeteer.launch({ headless: false });
let [page] = await browser.pages();
await page.setRequestInterception(true);
page.on('request', request => {
const headers = Object.assign({}, request.headers(), {
foo: 'bar'
});
request.continue({ headers });
});
await page.goto('http://google.com');
} catch (err) {
console.error(err);
}
})();
In my case, this was not relevant to setting headers.
When I enabled request interception with this line of code observing the same behavior:
await page.setRequestInterception(true);
If I comment out this line of code it is loading the page but the Chromium is complaining that the connection is insecure. In this case waitUntil option does not work within page.goto as option.
If I open a new tab in Chromium (the same window) and copy and paste the same url it is loading the page without any isues.
How does one set in the configuration to accept insecure self-signed certificates.
I'm using Selenium Standalone and webdriverio.
https://github.com/vvo/selenium-standalone
https://github.com/webdriverio/webdriverio
I cannot read anywhere how to do this.
I'm suing the code below:
const assert = require('assert');
const { promisify } = require('util');
const exec = promisify(require('child_process').exec);
const selenium = require('selenium-standalone');
const webdriverio = require('webdriverio');
selenium.installAsync = promisify(selenium.install);
selenium.startAsync = promisify(selenium.start);
let browser;
let seleniumChild;
before(async function () {
this.timeout(10 * 1000);
try {
// Remove any previous hanging sessions
await exec('pkill -f selenium-standalone');
} catch (error) {
if (error.cmd !== 'pkill -f selenium-standalone') {
console.error(error);
process.exit(1);
}
}
await selenium.installAsync({});
seleniumChild = await selenium.startAsync({});
const options = {
desiredCapabilities: {
browserName: 'chrome',
},
port: 4444,
};
browser = webdriverio.remote(options);
await browser.init();
await browser.url('http://google.com');
const title = await browser.getTitle();
console.log('Title ->', title);
await browser.end();
});
describe('test', function () {
it('test', async function () {
assert.ok(true);
});
});
Since it's starting a Selenium server, I'm expecting to be able to specify this through capabilities:
Did you tried using:
"acceptSslCerts": "true"
More on this topic you can find on the Selenium github page.