Getting all bids from each Header bidding partners - prebid.js

We are implementing some header bidding partners on our wrapper using prebid. Is it possible to get all bids from each ssp.
Any help appreciated.

If you’re asking about demand, this is dependent on each SSP. For example there may be a segment pixel or placement in one SSP that will always give you a $10 bid, but that wouldnt apply to the other SSPs.
If your asking about getting data on all the bids, you may want to check out pbjs.getBidResponses() which returns an object with the ad units and bids
Heres a sample response from pbjs.getBidResponses() which can then be used however you'd need that data:
{
"div-id-one": {
"bids": [
{
"bidderCode": "appnexus",
"width": 970,
"height": 250,
"statusMessage": "Bid available",
"adId": "1293a95bb3e9615",
"mediaType": "banner",
"creative_id": 77765220,
"cpm": 0.7826,
"adUrl": "https://...",
"requestId": "57f961f3-a32b-45df-a180-9d5e53fb9070",
"responseTimestamp": 1513707536256,
"requestTimestamp": 1513707535321,
"bidder": "appnexus",
"adUnitCode": "div-id-one",
"timeToRespond": 935,
"pbLg": "0.50",
"pbMg": "0.70",
"pbHg": "0.78",
"pbAg": "0.75",
"pbDg": "0.78",
"pbCg": "0.78",
"size": "970x250",
"adserverTargeting": {
"hb_bidder": "appnexus",
"hb_adid": "1293a95bb3e9615",
"hb_pb": "0.78",
"hb_size": "970x250"
}
}
]
},
"div-id-two": {
"bids": []
}
}
Theres also a great example on prebid.org on how to output this to console.table that could be helpful as well:
var responses = pbjs.getBidResponses();
var output = [];
for (var adunit in responses) {
if (responses.hasOwnProperty(adunit)) {
var bids = responses[adunit].bids;
for (var i = 0; i < bids.length; i++) {
var b = bids[i];
output.push({
'adunit': adunit, 'adId': b.adId, 'bidder': b.bidder,
'time': b.timeToRespond, 'cpm': b.cpm, 'msg': b.statusMessage
});
}
}
}
if (output.length) {
if (console.table) {
console.table(output);
} else {
for (var j = 0; j < output.length; j++) {
console.log(output[j]);
}
}
} else {
console.warn('NO prebid responses');
}

There is also a chrome extensions called Prebid helper that do the same as console snippet but with less clicks.
However that is useful for initial setup debug. If you need to gather aggregated data on all demand partners - bids, timeouts, wins etc. You will need to run third party wrapper analytics or use analytics adapter. It's not free but it usually is priced depending on your load on the analytics server. For example https://headbidder.net/pricing

Try out the Chrome Extension called Adwizard. It's been built to debug prebid setups. Shows you all networks and bids per adunit. CPM and Size included.
https://chrome.google.com/webstore/detail/adwizard/kndnhcfdajkaickocacghchhpieogbjh/?ref=stackoverflow

Related

Is there any way to keep the session open after alexa timer api goes off?

I would like to know if there are any possible ways or tricks where we can leave the session open for the user's input after the timer goes off because the timer API doc doesn't cover it.
timer_request_1 = {
"duration": "PT15S",
"timerLabel": "Change name",
"creationBehavior": {
"displayExperience": {
"visibility": "VISIBLE"
}
},
"triggeringBehavior": {
"operation": {
"type": "ANNOUNCE",
"textToAnnounce": [
{
"locale": "en-US",
"text": "Would you like to proceed with the x task?"
}
]
},
"notificationConfig": {
"playAudible": False
}
}
}
REQUIRED_PERMISSIONS = ["alexa::alerts:timers:skill:readwrite"]
class TimerIntentHandler(AbstractRequestHandler):
def can_handle(self, handler_input):
return ask_utils.is_intent_name("TimerIntent")(handler_input)
def handle(self, handler_input):
permissions = handler_input.request_envelope.context.system.user.permissions
if not (permissions and permissions.consent_token):
return (
handler_input.response_builder
.speak("Please give permissions to set timers using the alexa app.")
.set_card(
AskForPermissionsConsentCard(permissions=REQUIRED_PERMISSIONS)
)
.response
)
timer_service = handler_input.service_client_factory.get_timer_management_service()
timer_response = timer_service.create_timer(timer_request)
if str(timer_response.status) == "Status.ON":
session_attr = handler_input.attributes_manager.session_attributes
if not session_attr:
session_attr['lastTimerId'] = timer_response.id
speech_text = 'Your 40 minutes timer has started!.'
return (
handler_input.response_builder
.speak(speech_text)
.response
.ask("Would you like to proceed x task?")
)
else:
speech_text = 'Timer did not start'
return (
handler_input.response_builder
.speak(speech_text)
.response
)
I tried by adding a return .ask() but I got 'Response' object has no attribute 'ask' error.
Looking forward to hearing your thoughts :)
You use response to get the response from response_builder so you should place all speak, ask etc. builder methods before response.
One way to keep the session alive is to not send "shouldEndSession". This will not let the session close. But this will not be approved if you go for certification.

Unable to find the duplicate elements from API response

We were testing an API and recently got an issue, some of the customers cannot log in to the website.
We found the issue and it is because of Duplicate keys in the API response, it is giving response even if the API contains the duplicate key.
So tests are not helping for the duplicate key conditions,So can anyone please help me or guide how I can find whether there is a duplicate element in the API response.
Tool: postman
Below is the sample API output,
In the below JSON output from API we can find there are duplicates for "operatingSystem",like this duplicate key is coming for different elements.
Since there is no way to debug the API for a while due to some reasons,so need to find out these duplicate cases.
Any idea or suggestions will be much appreciated.Thanks in advance.
JSON
eg: {
"code": 2,
"deviceId": "ID",
"deviceName": "Test",
"platform": "x64",
"operatingSystem": "test",
"operatingSystem": "test",
"gde": 000,
"productVersion": "0.0",
"build": "00000",
"receipt": null
}
How could we handle such a situation. Do we have any method to automate/test this case?
Here's something you can try although it's a bit convoluted. pm.response.json() will normalize the response and remove any duplicates i.e. you won't be able to detect any. So what you can do is take the response in text then manipulate it into a list and look for duplicates there. I used a map object so that if the map already contains a given key then set a flag and fail the test.
This is not thoroughly tested but it should give you an idea or at least a starting point to tackle the problem:
var jsonBody = pm.response.text();
var str = jsonBody.substring(1, jsonBody.length-1);
var keyArr = str.split(",");
var keyMap = {};
var foundDups = false;
for (var i = 0; i < keyArr.length; i++) {
var key = keyArr[i].split(":")[0];
if(!(key in keyMap)) {
keyMap[key] = key;
console.log("added key " + key);
}
else {
console.log("found duplicate: " + key);
foundDups = true;
break;
}
}
pm.test("Look for dups", function() {
pm.expect(foundDups).to.eql(false);
});

Community Connector getData() Request only uses the first two schema fields, not all four

I am building a Community Connector between Google Data Studio and SpyFu.com, in order to funnel SEO information for a specific url into the GDS Dashboard.
However, My getData() request only contains the first two fields from my Schema. As you can see, I have four listed in the code. The result is only the first two fields in the schema are printed to GDS.
I've been through tutorials, official documentation, YouTube videos, looked this issue up on google and checked out the community resources on GitHub.
//Step Two: Define getConfig()
function getConfig(request) {
var cc = DataStudioApp.createCommunityConnector();
var config = cc.getConfig();
config.newInfo()
.setId('instructions')
.setText('Give me SpyFu information on the following domain:');
config.newTextInput()
.setId('domain')
.setName('Enter the domain to search')
.setHelpText('e.g. ebay.com')
.setPlaceholder('ebay.com');
config.newTextInput()
.setId('SECRET_KEY')
.setName('Enter your API Secret Key')
.setHelpText('e.g. A1B2C3D4')
.setPlaceholder('A1B2C3D4');
config.setDateRangeRequired(false);
return config.build();
}
//Step Three: Define getSchema()
function getFields(request) {
var cc = DataStudioApp.createCommunityConnector();
var fields = cc.getFields();
var types = cc.FieldType;
var aggregations = cc.AggregationType;
fields.newDimension()
.setId('Keyword')
.setName('Keywords')
.setDescription('The keywords most often attributed to this domain.')
.setType(types.TEXT);
fields.newMetric()
.setId('Rank')
.setName('Rankings')
.setDescription('The ranking of the target site keyword on the Google Search Page.')
.setType(types.NUMBER);
fields.newMetric()
.setId('Local_Monthly_Searches')
.setName('Local Searches per Month')
.setDescription('Number of times, locally, that people have searched for this term within in the last month.')
.setType(types.NUMBER);
fields.newMetric()
.setId('Global_Monthly_Searches')
.setName('Global Searches per Month')
.setDescription('Number of times, globally, that people have searched for this term within in the last month.')
.setType(types.NUMBER);
return fields;
}
function getSchema(request) {
var fields = getFields(request).build();
return { schema: fields };
}
//Step Four: Define getData()
function responseToRows(requestedFields, response, domain) {
// Transform parsed data and filter for requested fields
return response.map(function(Array) {
var row = [];
requestedFields.asArray().forEach(function (field) {
switch (field.getId()) {
case 'Keyword':
return row.push(Array.term);
case 'Rank':
return row.push(Array.position);
case 'Local_Monthly_Searches':
return row.push(Array.exact_local_monthly_search_volume);
case 'Global_Monthly_Searches':
return row.push(Array.exact_global_monthly_search_volume);
case 'domain':
return row.push(domain);
default:
return row.push('');
}
});
return { values: row };
});
}
function getData(request) {
console.log("Request from Data Studio");
console.log(request);
var requestedFieldIds = request.fields.map(function(field) {
return field.name;
});
var requestedFields = getFields().forIds(requestedFieldIds);
// Fetch data from API
var url = [
'https://www.spyfu.com/apis/url_api/organic_kws?q='
+ request.configParams.domain
+ '&r=20'
+ '&p=[1 TO 10]'
+ '&api_key='
+ request.configParams.SECRET_KEY,
];
try {
var response = UrlFetchApp.fetch(url.join(''));
} catch (e) {
DataStudioApp.createCommunityConnector()
.newUserError()
.setDebugText('Failed URL Fetch Attempt. Exception details: ' + e)
.setText('There was an error accessing this domain. Try again later, or file an issue if this error persists.')
.throwException();
}
console.log("Response from API");
console.log(response);
//Parse data from the API
try {
var parsedResponse = JSON.parse(response);
} catch (e) {
DataStudioApp.createCommunityConnector()
.newUserError()
.setDebugText('Error parsing the JSON data. Exception details: ' + e)
.setText('There was an error parsing the JSON data. Try again later, or file an issue if this error persists.')
.throwException();
}
var rows = responseToRows(requestedFields, parsedResponse);
return {
schema: requestedFields.build(),
rows: rows
};
}
I need the GDS to post four columns of data. They are, "Keyword", "Rank", "Local Monthly Searches" and "Global Monthly searches".
I cannot figure out how to create a "fixed schema" so that the system always prints these four columns of data at every request. The tutorials and various documentation say it's possible, but not how to do it. Please help!
The number of metrics initially called up by the Google Community Connector is handled from the front-end, via Google Data Studio.
The back-end system (the Connector) only initially posts the default dimension and default metric. Getting the rest of the schemas to post should be handled when you are building a report on Google Data Studio. Simply click on the data set, select "data" on the right-hand menu, scroll down to either Metrics or Dimensions, and pick the ones you wish to add to the current set.
Note that these are the fields you established earlier in the coding process, when you were setting up your schemas.
Here, you're filtering your defined schema for fields that are present on the request object received by getData().
var requestedFieldIds = request.fields.map(function(field) {
return field.name;
});
var requestedFields = getFields().forIds(requestedFieldIds);
The visualization in Google Data Studio that is the catalyst for the request will determine which fields are requested.

THINC-API - SPEC NOT ACTIVE

Could you please help me to solve problem with thic THINC-API?
I inspected my CNC with SCOUT and got following information:
ApiSpec=False
ThincApiInstalled=True
ApiInstallType=Basic
ThincApiCheckResult=VersionRecognized
ThincApiVersion=1.12.1.0-SPEC_NOT_ACTIVE
What should i do to get access to data?
Your machine does not have the proper specification.
Custom API (CAPI)
Lathe and Multi-Tasking Machines: NC-B Spec No. 4, Bit 0
Machining Center: NC Spec No. 32, Bit 2
You can confirm in your program if a machine has this spec code enabled by using SCOUT:
bool HasCustomAPI = false;
if (Okuma.Scout.Platform.BaseMachineType == Okuma.Scout.Enums.MachineType.L)
{
if (Okuma.Scout.SpecCode.NCB.MachineSpecCodeFileExists)
{
if (Okuma.Scout.SpecCode.NCB.SpecFileIsValid)
{
HasCustomAPI = Okuma.Scout.SpecCode.NCB.Bit(
Okuma.Scout.Enums.NCBSpecGroup.NCB1MG, 4, 0);
}
}
}
else if (Okuma.Scout.Platform.BaseMachineType == Okuma.Scout.Enums.MachineType.M)
{
if (Okuma.Scout.SpecCode.NC.MachineSpecCodeFileExists)
{
if (Okuma.Scout.SpecCode.NC.SpecFileIsValid)
{
HasCustomAPI = Okuma.Scout.SpecCode.NC.Bit(
Okuma.Scout.Enums.NCSpecGroup.NC1MG, 32, 2);
}
}
}
if (HasCustomAPI)
{
// ...
}
The Okuma Open API (THINC API) requires this spec code to function.
It comes standard with machines sold in the USA.
For other counties, this is an option you will need to order.
Please contact your Okuma Representative.

Upload of Media via VMware API results in larger transferred size than file size

We're utilizing the V Cloud API to interact with virtual machines (create machines, perform actions, switch media, etc). One requested function is to be able to upload media (specifically ISO's) to a particular a catalog. The API guide (pg 67) is fairly straightforward, and our multi-part requests to the URL that is provided when the upload starts go off without a hitch.
Note: We have to declare the file size before starting the upload
The only thing that seems amiss during the upload itself is that the "transferred size" ends up being larger than the "file size" at the end of the process. This is somewhat odd because our content-range never exceeds the expected file size (we assume that the meta data is being included without us having a say). Once this transferred size exceeds the file size, the status of the file upload changes to "Error" but still returns a 200 OK
{
"name": "J Small 4",
"description": "",
"files": [{
"name": "file",
"totalSize": 50696192,
"status": "Error",
"link": "https://cloud01.cs2cloud.com/transfer/27b8f93c-8319-419e-9e8c-15622097670b/file",
"transferredSize": 54293177
}],
"id": "urn:vcloud:media:1cec68ef-f22e-4ec7-ae5d-dfbc4f7137d9",
"catalogId": "urn:vcloud:catalogitem:19dbfdd8-ea70-4355-abc7-96e34dccb869"
}
Not sure where to even start debugging this since all the API calls come back with 200 OK, the .ISO file seems to be fine, our content-range headers never go outside the established file size, and the meta-data seems to be out of our control in terms of editing or measuring it.
Hoping some soul has experienced this issue before and can provide some insight into working towards a solution
It turns out the issue wasn't with the vmware at all, but how we were chunking up the media file. We initially used FileReader() to chunk up the file and send it over to the VMware API.
Theoretically, we were choosing the chunk size and could then generate and set the content range, but in reality we were choosing the content-range but the content-length was different than the chunk size. We're still not entirely sure why it happened (maybe extra meta data being added on) but we found a solution.
The fix: We eliminated FileReader() altogether and just put the file slices directly into a blob (you can see below)
$scope.parseMediaFile = function(url, file, catalogId) {
$scope.uploadingMediaFile = true;
var fileSize = file.size;
var chunkSize = 1024 * 1024 * 5; // bytes
var offset = 0;
var self = this; // we need a reference to the current object
var chunkReaderBlock = null;
var chunkNum = 0;
if (fileSize < chunkSize) {
chunkSize = fileSize;
}
chunkReaderBlock = function(_offset, length, _file) {
var blob = _file.slice(_offset, length + _offset);
var beginRange = _offset;
var endRange = _offset + length;
if(endRange > _file.size) {
endRange = _file.size
}
var contentRange = beginRange + "-" + endRange;
vdcServices.uploadMediaFile(url, blob, fileSize, contentRange).then(
function(resp) {
vdcServices.getUploadStatus($scope.company, catalogId).then(function(resp) {
var uploaded = resp.data.files[0].transferredSize;
$scope.mediaPercentLoaded = $scope.trunc((uploaded / fileSize) * 100);
if (endRange == _file.size) {
$scope.closeModal();
return;
}
chunkReaderBlock(_offset+length, chunkSize, file);
}, function(err) {
$scope.errorMsg = err;
chunkReaderBlock(_offset-length, chunkSize, file);
})
},
function(err) {
$scope.errorMsg = err;
}
)
}
// Starts the read with the first block
if (offset < fileSize) {
chunkReaderBlock(offset, chunkSize, file)
}
}
Doing so allowed us to actually control the content-length, and since we can identify when the number of bytes transferred is equal to the file size we could then complete the process.