Fit content width-wise in pdf - pdf

When rendering to pdf, I need the html page to be 100% of the print width. Otherwise content gets cut off. Is there an easy way to do this?
I came up with a work-around, which gets the html width after rendering and then sets the zoom factor to force the correct width.
var page = require('webpage').create(),
system = require('system'),
dpi = 89.9, // strange, but that's what I get on Mac
pageWidth = 210; // in mm
var getWidth = function() {
return page.evaluate(function() {
// get width manually
// ...
return width;
});
};
page.paperSize = {format: 'A4', orientation: 'portrait'};
page.open(system.args[1], function (status) {
window.setTimeout(function () {
page.zoomFactor = (pageWidth / 25.4) * dpi / getWidth();
page.render(system.args[2]);
phantom.exit();
}, 200);
});
Looking for a straight forward solution, because getting the width requires me to do some tricks due to the framework I use.

Managed to solve this by adding the following CSS rule which will zoom out the page and make it fit:
html {
zoom: 0.60;
}

Edit rasterize.js as follows.
Make a backup of rasterize.js
Remove this block (approx. line 18-31)
if (system.args.length > 4) {
page.zoomFactor = system.args[4];
}
page.open(address, function (status) {
if (status !== 'success') {
console.log('Unable to load the address!');
phantom.exit();
} else {
window.setTimeout(function () {
page.render(output);
phantom.exit();
}, 200);
}
});
Replace it with this code block
if (system.args.length > 4) {
page.zoomFactor = parseFloat(system.args[4].split("=")[1]);
}
page.open(address, function (status) {
if (status !== 'success') {
console.log('Unable to load the address!');
phantom.exit(1);
} else {
window.setTimeout(function () {
console.log("zoomFactor: " + page.zoomFactor);
page.evaluate(function(zoom) {
return document.querySelector('body').style.zoom = zoom;
}, page.zoomFactor); // <-- your zoom here
page.render(output);
phantom.exit();
}, 200);
}
});
Then you can export html to pdf on-the-fly like this:
~/bin/phantomjs ~/bin/phantomjs/examples/rasterize.js \
'http://example.com/index.html' \
mysite.pdf paperformat=A4 zoom=0.4

Related

BackstopJS: Not able to scroll the page to end using scrollToSelector

I am using two files one to keep all the URLs and other variables and other to keep the scenario config
const projectId = "test"; //
let baseUrl = "someurl"; //
let scrollToSelector = "";
let removeSelector = "";
// Replace the values of the below array with the relative URLs of your website. E.g., "/about", "/contact", "/pricing", etc.
// Use just "/" to test the homepage of your website.
// Add as many relative URLs as you need.
const relativeUrls =[
"/about",
"/documentation",
"/case-studies/",
"/solutions/",
"/blog/"
];
relativeUrls.map(relativeUrl => {
if (relativeUrl === "/about") {
scrollToSelector = "a.wp-block-button__link";
removeSelector = ".is-style-image-banner"
console.log(scrollToSelector);
}
});
// Leave the below array as is if you want to test your website using the viewports listed below.
// The suported viewports are: phone (320px X 480px), tablet (1024px X 768px), and desktop (1280px X 1024px).
// No other viewports are supported.
// You can remove the viewports that you don't need, but at least one of them is required.
const viewports = [
"phone",
"tablet",
"desktop",
];
module.exports = {
baseUrl,
projectId,
relativeUrls,
viewports,
scrollToSelector,
removeSelector
};
mainConfig.js
const THREE_SECONDS_IN_MS = 3000;
const scenarios = [];
const viewports = [];
basicConfig.relativeUrls.map(relativeUrl => {
scenarios.push({
label: relativeUrl,
url: `${basicConfig.baseUrl}${relativeUrl}`,
delay: THREE_SECONDS_IN_MS,
requireSameDimensions: false,
scrollToSelector: basicConfig.scrollToSelector,
removeSelectors: [basicConfig.removeSelector]
// onReadyScript: "onReadyScript.js",
// readyEvent: "page_loaded"
});
});
basicConfig.viewports.map(viewport => {
if (viewport === "phone") {
pushViewport(viewport, 320, 480);
}
if (viewport === "tablet") {
pushViewport(viewport, 1024, 768);
}
if (viewport === "desktop") {
pushViewport(viewport, 1280, 1024);
}
});
function pushViewport(viewport, width, height) {
viewports.push({
name: viewport,
width,
height,
});
}
module.exports = {
id: basicConfig.projectId,
viewports,
scenarios,
paths: {
bitmaps_reference: "test/backstop_data/bitmaps_reference",
bitmaps_test: "test/backstop_data/bitmaps_test",
html_report: "test/backstop_data/html_report"
},
report: ["CI"],
engine: "puppeteer",
engineOptions: {
args: ["--no-sandbox"]
},
asyncCaptureLimit: 5,
asyncCompareLimit: 50,
};
scrollToSelector doesn't seem to be working. Is there any other way it should declared and called.
Your scrollToSelector need to be in the rectangular parentesis [], like that scrollToSelector: ['.a.wp-block-button__link']

Is there any reason why no error found in calling navigator.mediaDevices.getUserMedia

I'm writing Webrtc chrome desktop app by accessing 2 camera simultaneously using latest Chrome Windows version.
Accessing camera list by navigator.mediaDevices.enumerateDevices() is ok but accessing these device by their specific id using navigator.mediaDevices.getUserMedia not work.
It only occurs sometimes. But no error in the catch.
So, I tried navigator.mediaDevices.getUserMedia is really exists or not.
if (navigator && navigator.mediaDevices && navigator.mediaDevices.getUserMedia) {
}
Yes, it was.
Just not getting any log info in calling navigator.mediaDevices.getUserMedia()
getVideoSources_ = function() {
return new Promise(function(resolve, reject) {
if (typeof navigator.mediaDevices.enumerateDevices === 'undefined') {
alert('Your browser does not support navigator.mediaDevices.enumerateDevices, aborting.');
reject(Error("Your browser does not support navigator.mediaDevices.enumerateDevices, aborting."));
return;
}
requestVideoSetTimeout = 500;
navigator.mediaDevices.enumerateDevices().then((devices) => {
// get the list first by sorting mibunsho camera in first place
for (var i = 0; i < devices.length; i++) {
log("devices[i]", JSON.stringify(devices[i]));
log("devices[i].label", devices[i].label);
if (devices[i].kind === 'videoinput' && devices[i].deviceId && devices[i].label) {
if (devices[i].label.indexOf("USB_Camera") > -1) {
deviceList[1] = devices[i];
} else {
deviceList[0] = devices[i];
}
}
}
// request video by sorted plan
for (var i = 0; i < deviceList.length; i++) {
requestVideo_(deviceList[i].deviceId, deviceList[i].label, resolve, reject);
requestVideoSetTimeout = 1000; // change requestVideoSetTimeout for next video request
}
}).catch((err) => {
log("getVideoSources_:" + err.name + ": " + err.message);
reject(Error("getVideoSources_ catch error"));
});
});
}
getVideoSources_().then(function(result) {
....
}).catch(function(err) {
....
});
function requestVideo_(id, label, resolve, reject) {
if (navigator && navigator.mediaDevices && navigator.mediaDevices.getUserMedia) {
log("navigator.mediaDevices.getUserMedia found!");
navigator.mediaDevices.getUserMedia({
video: {
deviceId: {exact: id},
width: 640,
height: 480,
frameRate: {
ideal: 20,
max: 20
}
},
audio: false}).then(
(stream) => {
log("***requestVideo_", id);
log("***requestVideo_", label);
log("***requestVideo_", stream);
// USB_Camera is face camera
if (label.indexOf("USB_Camera") > -1) {
log("***requestVideo_001");
myStream2 = stream;
log("***requestVideo_myStream2", myStream2);
} else {
log("***requestVideo_002");
myStream = stream;
log("***requestVideo_myStream", myStream);
getUserMediaOkCallback_(myStream, label);
}
resolve("###Video Stream got###");
stream.getVideoTracks()[0].addEventListener('ended', function(){
log("***Camera ended event fired. " + id + " " + label);
endedDevice[id] = label;
});
},
getUserMediaFailedCallback_
).catch((error) => {
log('requestVideo_: ' + error.name);
reject(Error("requestVideo_ catch error" + error.name));
});
}
}
function getUserMediaFailedCallback_(error) {
log("getUserMediaFailedCallback_ error:", error.name);
alert('User media request denied with error: ' + error.name);
}
#Kaiido is right, resolve is called in a for-loop here, which is all over the place, and before all the code has finished. Any error after this point is basically lost.
This is the promise constructor anti-pattern. In short: Don't write app code inside promise constructors. Don't pass resolve and reject functions down. Instead, let functions return promises up to you, and add all app code in then callbacks on them. Then return all promises so they form a single chain. Only then do errors propagate correctly.
See Using promises on MDN for more.

StreamTrack's readyState is getting changed to ended, just before playing the stream (MediaStream - MediaStreamTrack - WebRTC)

The jsfiddle (https://jsfiddle.net/kalyansai99/mm1b74uy/22/) contains code where the user can toggle between front and back camera of the mobile.
In few mobiles its working fine (Moto g5 plus, Moto E3 and so on - Chrome Browser) and in few mobiles (Mi Redimi Note 4 - Chrome Browser) when I am switching to back camera, initially the stream is loading with a track of "readyState" as "live". But when i am about to play the stream in video player, the "readyState" is getting changed to "ended" and black screen is been shown on the video tag.
Not sure whats happening. Any clues?
JSFiddle Code
var player = document.getElementById('player');
var flipBtn = document.getElementById('flipBtn');
var deviceIdMap = {};
var front;
var constraints = {
audio: false,
video: {
frameRate: 1000
}
};
var gotDevices = function (deviceList) {
var length = deviceList.length;
console.log(deviceList);
for (var i = 0; i < length; i++) {
var deviceInfo = deviceList[i];
if (deviceInfo.kind === 'videoinput') {
if (deviceInfo.label.indexOf('front') !== -1) {
deviceIdMap.front = deviceInfo.deviceId;
} else if (deviceInfo.label.indexOf('back') !== -1) {
deviceIdMap.back = deviceInfo.deviceId;
}
}
}
if (deviceIdMap.front) {
constraints.video.deviceId = {exact: deviceIdMap.front};
front = true;
} else if (deviceIdMap.back) {
constraints.video.deviceId = {exact: deviceIdMap.back};
front = false;
}
console.log('deviceIdMap - ', deviceIdMap);
};
var handleError = function (error) {
console.log('navigator.getUserMedia error: ', error);
};
function handleSuccess(stream) {
window.stream = stream;
// this is a video track as there is no audio track
console.log("Track - ", window.stream.getTracks()[0]);
console.log('Ready State - ', window.stream.getTracks()[0].readyState);
if (window.URL) {
player.src = window.URL.createObjectURL(stream);
} else {
player.src = stream;
}
player.onloadedmetadata = function (e) {
console.log('Ready State - 3', window.stream.getTracks()[0].readyState);
player.play();
console.log('Ready State - 4', window.stream.getTracks()[0].readyState);
}
console.log('Ready State - 2', window.stream.getTracks()[0].readyState);
}
navigator.mediaDevices.enumerateDevices().then(gotDevices).catch(handleError);
flipBtn.addEventListener('click', function () {
if (window.stream) {
window.stream.getTracks().forEach(function(track) {
track.stop();
});
}
if (front) {
constraints.video.deviceId = {exact: deviceIdMap.back};
} else {
constraints.video.deviceId = {exact: deviceIdMap.front};
}
front = !front;
navigator.getUserMedia(constraints, handleSuccess, handleError);
}, false);
console.log(constraints);
navigator.getUserMedia(constraints, handleSuccess, handleError);
#player {
width: 320px;
}
#flipBtn {
width: 150px;
height: 50px;
}
<video id="player" autoplay></video>
<div>
<button id="flipBtn">
Flip Camera
</button>
</div>
Replace track.stop() to track.enabled=false and when adding track to the stream, enable it back using track.enabled=true
The MediaStream.readyState property is changed to "ended" when we stop the track and can never be used again. Therefore its not wise to use stop. For more reference:
https://developer.mozilla.org/en-US/docs/Web/API/MediaStreamTrack/readyState
https://developer.mozilla.org/en-US/docs/Web/API/MediaStreamTrack/stop

Parse multiple pages with phantomjs

I have made a code that parses all URL-s from a page. Next, I would like to get a href from every parsed URL <div class="holder"></div> and output it to a file and sepparate with a comma.
So far I have made this code. It is able to find all the URL-s need to be parsed and collects them to a comma sepparated file called output2.txt.
var resourceWait = 300,
maxRenderWait = 10000,
url = 'URL TO PARSE HREF-s FROM';
var page = require('webpage').create(),
count = 0,
forcedRenderTimeout,
renderTimeout;
page.viewportSize = { width: 1280, height : 1024 };
function doRender() {
var fs = require('fs');
var path = 'output2.txt';
page.includeJs("http://ajax.googleapis.com/ajax/libs/jquery/1.6.1/jquery.min.js", function() {
fs.write(path,page.evaluate(function() {
return $('.urlDIV').find('a')
.map(function() {
return this.href;})
.get()
.join(',');
}), 'w');
phantom.exit()
});
}
page.onResourceRequested = function (req) {
count += 1;
clearTimeout(renderTimeout);
};
page.onResourceReceived = function (res) {
if (!res.stage || res.stage === 'end') {
count -= 1;
if (count === 0) {
renderTimeout = setTimeout(doRender, resourceWait);
}
}
};
page.open(url, function (status) {
if (status !== "success") {
phantom.exit();
} else {
forcedRenderTimeout = setTimeout(function () {
console.log(count);
doRender();
}, maxRenderWait);
}
});
Thanks in advance,
Martti

phantomJS webpage timeout

I have set up a script to create webshots of our app.
It runs perfectly and all is fine Until I encounter an image with a broken url :
"<img src='http://testserver.our.intranet/fetch/image/373e8fd2339696e2feeb680b765d626e' />"
I have managed to break the script after 6 seconds using the below, Before it was just looping forever.
But, is it possible to ignore the network request (AKA take the image out of DOM) and then proceed to create the thumb without the image, (or with an injected image missing image !)
var page = require('webpage').create(),
system = require('system'),
address, output, size;
if (system.args.length < 3 || system.args.length > 5) {
phantom.exit(1);
} else {
address = system.args[1];
output = system.args[2];
page.viewportSize = { width: 640, height: 640 };
page.zoomFactor = 0.75;
page.clipRect = { top: 10, left: 0, width: 640, height: 490 };
try{
page.open(address, function (status) {
if (status !== 'success') {
console.log('Unable to load the address!');
phantom.exit();
} else {
window.setTimeout(function () {
page.render(output);
phantom.exit();
}, 200);
}
});
} finally{
setTimeout(function() {
console.log("Max execution time " + Math.round(6000) + " seconds exceeded");
phantom.exit(1);
}, 6000);
}
}
PhantomJS 1.9 has introduced a new setting, resourceTimeout, that controls how long a request can take before it gets cancelled. Along with that, there's a onResourceTimeout event that is triggered if/when a request times out.
Here's a code snippet illustrating all of the above:
var page = require('webpage').create();
page.settings.resourceTimeout = 5000; // 5 seconds
page.onResourceTimeout = function(e) {
console.log(e.errorCode); // it'll probably be 408
console.log(e.errorString); // it'll probably be 'Network timeout on resource'
console.log(e.url); // the url whose request timed out
phantom.exit(1);
};
page.open('http://...', function (status) {
...
}
Unfortunately those options are poorly documented right now. I had to go through GitHub discussions and the PhantomJS source code in order to find them out.