Query the ouput and get latest file name - azure-log-analytics

Below is the kusto query:
ADFActivityRun
| where PipelineName contains "MyPipeline"
| where OperationName == "Failed"
| order by TimeGenerated desc
| take 1
The Output column gives below result:
"{
""name"": ""varFileNames"",
""value"": [
{
""name"": ""Z400_EEE_20191110_ERR.txt"",
""type"": ""File""
},
{
""name"": ""Z400_CCC_20191119_ERR.txt"",
""type"": ""File""
},
{
""name"": ""Z400_DDD_20191121_ERR.txt"",
""type"": ""File""
},
{
""name"": ""Z400_EEE_20191122_ERR.txt"",
""type"": ""File""
},
{
""name"": ""Z400_AAA_20191202_ERR.txt"",
""type"": ""File""
}
]
}"
File names has yyyymmdd in it. I want to get only the text file name which is latest. In above case - Z400_AAA_20191202_ERR.txt
Intent is to send alert that - "The above error file is available, please check this file"

you could use mv-apply for achieving that.
for example:
print d = dynamic({
"name": "varFileNames",
"value": [
{
"name": "Z400_EEE_20191110_ERR.txt",
"type": "File"
},
{
"name": "Z400_CCC_20191119_ERR.txt",
"type": "File"
},
{
"name": "Z400_DDD_20191121_ERR.txt",
"type": "File"
},
{
"name": "Z400_EEE_20191122_ERR.txt",
"type": "File"
},
{
"name": "Z400_AAA_20191202_ERR.txt",
"type": "File"
}
]
})
| mv-apply value = d.value on (
parse value.name with * "_" * "_" dt:datetime "_" *
| top 1 by dt desc
| project name = value.name
)
| project name

Related

How to check a particular value on basis of condition in karate

Goal: Match the check value is correct for 123S and 123O response in API
First check the value on this location x.details[0].user.school.name[0].codeable.text if it is 123S then check if x.details[0].data.check value is abc
Then check if the value on this location x.details[1].user.school.name[0].codeable.text is 123O then check if x.details[1].data.check is xyz
The response in array inter changes it is not mandatory first element is 123S sometime API returns 123O as first array response.
Sample JSON.
{
"type": "1",
"array": 2,
"details": [
{
"path": "path",
"user": {
"school": {
"name": [
{
"value": "this is school",
"codeable": {
"details": [
{
"hello": "yty",
"condition": "check1"
}
],
"text": "123S"
}
}
]
},
"sample": "test1",
"id": "22222"
},
"data": {
"check": "abc"
}
},
{
"path": "path",
"user": {
"school": {
"name": [
{
"value": "this is school",
"codeable": {
"details": [
{
"hello": "def",
"condition": "check2"
}
],
"text": "123O"
}
}
]
},
"sample": "test",
"id": "11111"
},
"data": {
"check": "xyz"
}
}
]
}
How I did in Postman but how to replicate same in Karate?
var jsonData = pm.response.json();
pm.test("Body matches string", function () {
for(var i=0;i<jsonData.details.length;i++){
if(jsonData.details[i].user.school.name[0].codeable.text == '123S')
{
pm.expect(jsonData.details[i].data.check).to.equal('abc');
}
if(jsonData.details[i].user.school.name[0].codeable.text == '123O')
{
pm.expect(jsonData.details[i].data.check).to.equal('xyz');
}
}
});
2 lines. And this takes care of any number of combinations of lookup values :)
* def lookup = { '123S': 'abc', '123O': 'xyz' }
* match each response.details contains { data: { check: '#(lookup[_$.user.school.name[0].codeable.text])' } }

Upload bulk csv data into existing DynamoDB table

I'm trying to migrate data from a csv file into an existing AWS DynamoDB table, as part of an AWS Amplify web app.
I followed this CloudFormation tutorial, using the below template.
I was only able to create a new DynamoDB table, but not use an existing table and add data to it.
QUESTION:
Is there a way to modify the template so that I can provide an existing table name at the "Specify stack details" step in the wizard, under "DynamoDBTableName", so that the csv data will be added to the table? If not, is there an alternative process?
{
"AWSTemplateFormatVersion": "2010-09-09",
"Metadata": {
},
"Parameters" : {
"BucketName": {
"Description": "Name of the S3 bucket you will deploy the CSV file to",
"Type": "String",
"ConstraintDescription": "must be a valid bucket name."
},
"FileName": {
"Description": "Name of the S3 file (including suffix)",
"Type": "String",
"ConstraintDescription": "Valid S3 file name."
},
"DynamoDBTableName": {
"Description": "Name of the dynamoDB table you will use",
"Type": "String",
"ConstraintDescription": "must be a valid dynamoDB name."
}
},
"Resources": {
"DynamoDBTable":{
"Type": "AWS::DynamoDB::Table",
"Properties":{
"TableName": {"Ref" : "DynamoDBTableName"},
"BillingMode": "PAY_PER_REQUEST",
"AttributeDefinitions":[
{
"AttributeName": "id",
"AttributeType": "S"
}
],
"KeySchema":[
{
"AttributeName": "id",
"KeyType": "HASH"
}
],
"Tags":[
{
"Key": "Name",
"Value": {"Ref" : "DynamoDBTableName"}
}
]
}
},
"LambdaRole" : {
"Type" : "AWS::IAM::Role",
"Properties" : {
"AssumeRolePolicyDocument": {
"Version" : "2012-10-17",
"Statement" : [
{
"Effect" : "Allow",
"Principal" : {
"Service" : ["lambda.amazonaws.com","s3.amazonaws.com"]
},
"Action" : [
"sts:AssumeRole"
]
}
]
},
"Path" : "/",
"ManagedPolicyArns":["arn:aws:iam::aws:policy/service-role/AWSLambdaBasicExecutionRole","arn:aws:iam::aws:policy/AWSLambdaInvocation-DynamoDB","arn:aws:iam::aws:policy/AmazonS3ReadOnlyAccess"],
"Policies": [{
"PolicyName": "policyname",
"PolicyDocument": {
"Version": "2012-10-17",
"Statement": [{
"Effect": "Allow",
"Resource": "*",
"Action": [
"dynamodb:PutItem",
"dynamodb:BatchWriteItem"
]
}]
}
}]
}
},
"CsvToDDBLambdaFunction": {
"Type": "AWS::Lambda::Function",
"Properties": {
"Handler": "index.lambda_handler",
"Role": {
"Fn::GetAtt": [
"LambdaRole",
"Arn"
]
},
"Code": {
"ZipFile": {
"Fn::Join": [
"\n",
[
"import json",
"import boto3",
"import os",
"import csv",
"import codecs",
"import sys",
"",
"s3 = boto3.resource('s3')",
"dynamodb = boto3.resource('dynamodb')",
"",
"bucket = os.environ['bucket']",
"key = os.environ['key']",
"tableName = os.environ['table']",
"",
"def lambda_handler(event, context):",
"",
"",
" #get() does not store in memory",
" try:",
" obj = s3.Object(bucket, key).get()['Body']",
" except:",
" print(\"S3 Object could not be opened. Check environment variable. \")",
" try:",
" table = dynamodb.Table(tableName)",
" except:",
" print(\"Error loading DynamoDB table. Check if table was created correctly and environment variable.\")",
"",
" batch_size = 100",
" batch = []",
"",
" #DictReader is a generator; not stored in memory",
" for row in csv.DictReader(codecs.getreader('utf-8-sig')(obj)):",
" if len(batch) >= batch_size:",
" write_to_dynamo(batch)",
" batch.clear()",
"",
" batch.append(row)",
"",
" if batch:",
" write_to_dynamo(batch)",
"",
" return {",
" 'statusCode': 200,",
" 'body': json.dumps('Uploaded to DynamoDB Table')",
" }",
"",
"",
"def write_to_dynamo(rows):",
" try:",
" table = dynamodb.Table(tableName)",
" except:",
" print(\"Error loading DynamoDB table. Check if table was created correctly and environment variable.\")",
"",
" try:",
" with table.batch_writer() as batch:",
" for i in range(len(rows)):",
" batch.put_item(",
" Item=rows[i]",
" )",
" except:",
" print(\"Error executing batch_writer\")"
]
]
}
},
"Runtime": "python3.7",
"Timeout": 900,
"MemorySize": 3008,
"Environment" : {
"Variables" : {"bucket" : { "Ref" : "BucketName" }, "key" : { "Ref" : "FileName" },"table" : { "Ref" : "DynamoDBTableName" }}
}
}
},
"S3Bucket": {
"DependsOn" : ["CsvToDDBLambdaFunction","BucketPermission"],
"Type": "AWS::S3::Bucket",
"Properties": {
"BucketName": {"Ref" : "BucketName"},
"AccessControl": "BucketOwnerFullControl",
"NotificationConfiguration":{
"LambdaConfigurations":[
{
"Event":"s3:ObjectCreated:*",
"Function":{
"Fn::GetAtt": [
"CsvToDDBLambdaFunction",
"Arn"
]
}
}
]
}
}
},
"BucketPermission":{
"Type": "AWS::Lambda::Permission",
"Properties":{
"Action": "lambda:InvokeFunction",
"FunctionName":{"Ref" : "CsvToDDBLambdaFunction"},
"Principal": "s3.amazonaws.com",
"SourceAccount": {"Ref":"AWS::AccountId"}
}
}
},
"Outputs" : {
}
}
Another answer
Dennis' answer is one solution, but you can also comment out the "DynamoDBTable" part within the "Resources" of the JSON file.
You can migrate CSV files from Amazon S3 to Amazon DynamoDB using the AWS Database Migration Service (DMS). Have a look at this step-by step walkthrough.

SSMS Tabular Model: create multiple partition via one single xmla script

I need to create a considerable number of partitions using XMLA scripts run through SSMS.
Is there a way to combine the creation of multiple partitions into one single script.
In below example, I need to execute the first script. When it finishes, I can open the other script and execute it as well. Very time consuming.
How can I restructure the code to run it in only one execution?
Script 1:
"createOrReplace": {
"object": {
"database": "MYDB",
"table": "MYTABLE1", "partition": "Partition"
},
"partition": {
"name": "Process_OLD", "dataView": "full",
"source": {
"type": "m",
"expression": [
"let",
" Source = #\"mySQL/MY_SCHEMA\",", " MY_SCHEMA= Source{[Schema=\"MY_SCHEMA\"]}[Data],",
" AllData = MY_SCHEMA{[Name=\"MYTABLE1\"]}[Data],", "\t#\"Filtered Rows\" = Table.SelectRows(AllData, each [DATE] < 20170101)",
"in",
" #\"Filtered Rows\""
]
}
}
}
}
Script 2:
"createOrReplace": {
"object": {
"database": "MYDB",
"table": "MYTABLE2", "partition": "Partition"
},
"partition": {
"name": "Process_NEW", "dataView": "full",
"source": {
"type": "m",
"expression": [
"let",
" Source = #\"mySQL/MY_SCHEMA\",", " MY_SCHEMA= Source{[Schema=\"MY_SCHEMA\"]}[Data],",
" AllData = MY_SCHEMA{[Name=\"MYTABLE1\"]}[Data],", "\t#\"Filtered Rows\" = Table.SelectRows(AllData, each [DATE] >= 20170101)",
"in",
" #\"Filtered Rows\""
]
}
}
}
}
You can put a sequence command around it:
{
"sequence": {
"operations": [
{
"createOrReplace": {
"object": {
"database": "MYDB",
"table": "MYTABLE1",
"partition": "Partition"
},
"partition": {
"name": "Process_OLD",
"dataView": "full",
"source": {
"type": "m",
"expression": [
"let",
" Source = #\"mySQL/MY_SCHEMA\",",
" MY_SCHEMA= Source{[Schema=\"MY_SCHEMA\"]}[Data],",
" AllData = MY_SCHEMA{[Name=\"MYTABLE1\"]}[Data],",
"\t#\"Filtered Rows\" = Table.SelectRows(AllData, each [DATE] < 20170101)",
"in",
" #\"Filtered Rows\""
]
}
}
}
},
{
"createOrReplace": {
"object": {
"database": "MYDB",
"table": "MYTABLE2",
"partition": "Partition"
},
"partition": {
"name": "Process_NEW",
"dataView": "full",
"source": {
"type": "m",
"expression": [
"let",
" Source = #\"mySQL/MY_SCHEMA\",",
" MY_SCHEMA= Source{[Schema=\"MY_SCHEMA\"]}[Data],",
" AllData = MY_SCHEMA{[Name=\"MYTABLE1\"]}[Data],",
"\t#\"Filtered Rows\" = Table.SelectRows(AllData, each [DATE] >= 20170101)",
"in",
" #\"Filtered Rows\""
]
}
}
}
}
]
}
}

How to write a De-Identifying template in Google BigQuery

I am trying to De Identify certain columns from a CSV file in Google Cloud Services. The CSV file contains 10 columns having ID, FirstName, LastName, D-O-B, etc. I am trying to mask the fields FirstName and LastName to replace them with * character.
I read the procedure to write a deidentifying template from this link.
I am trying to mask only the First Name and Last Name fields using record transformations, however I am getting an ArrayOutOf Bounds error when i'm running the job.
Is It necessary that I have to mention all the columns in the De identification template or only those fields that I need to mask.
The CSV file is something as follows:
ID FirstName LastName D_O_B Facility EncounterNum EncounterDate EncounterTime visitNum
101 Sean John 8/27/1968 LI 333 4/8/2016 2018-09-02 13:00:00 UTC 1
501 bla bla 7/13/1947 LI 337 3/14/2016 2018-09-03 21:05:00 UTC 67
851 Julius Caesar 8/15/1988 LI 339 5/17/2016 2018-09-03 21:25:00 UTC 89
The Deidentfication template I am using is as follows:
{
"deidentifyTemplate": {
"description": "Record transformation on Names trial",
"deidentifyConfig": {
"recordTransformations": {
"fieldTransformations": [
{
"fields": [
{
"name": "FirstName"
},
{
"name": "LastName"
}
],
"primitiveTransformation": {
"characterMaskConfig": {
"maskingCharacter": "*"
}
}
}
]
}
}
}
}
I expect the output to be a tabe in BigQuery containing masked FirstName and Lastname columns. I am however getting an Array out of bounds error.
Not an exert of the DLP API but I tried the following de-identify configuration and it worked for me. Using the following endpoint for Cloud DLP API.
{
"item": {
"value": "My name is John Doe and I live nowhere."
},
"inspectConfig": {
"includeQuote": true,
"infoTypes": [
{
"name": "FIRST_NAME"
},
{
"name": "LAST_NAME"
}
]
},
"deidentifyConfig": {
"infoTypeTransformations": {
"transformations": [
{
"infoTypes": [
{
"name": "FIRST_NAME"
},
{
"name": "LAST_NAME"
}
],
"primitiveTransformation": {
"characterMaskConfig": {
"maskingCharacter": "*"
}
}
}
]
}
}
}
Results:
{
"item": {
"value": "My name is **** *** and I live nowhere."
},
"overview": {
"transformedBytes": "7",
"transformationSummaries": [
{
"infoType": {
"name": "FIRST_NAME"
},
"transformation": {
"characterMaskConfig": {
"maskingCharacter": "*"
}
},
"results": [
{
"count": "1",
"code": "SUCCESS"
}
],
"transformedBytes": "4"
},
{
"infoType": {
"name": "LAST_NAME"
},
"transformation": {
"characterMaskConfig": {
"maskingCharacter": "*"
}
},
"results": [
{
"count": "1",
"code": "SUCCESS"
}
],
"transformedBytes": "3"
}
]
}
}

vb.net converting string, and downloading files in one a time, and multiple

I'm creating a Minecraft Launcher. This launcher using json file format to save settings, load avaible versions, load libraries, asset files, jar files, and update game.
To parse json file, this launcher using Newtonsoft.json. I have a big problem with all of these json files (you can check versions list: https://s3.amazonaws.com/Minecraft.Download/versions/versions.json.), for example:
This is one of them: (1.8.1.json, my launcher using this to check if library file is exists, the library files listed in this json , the list starts with line: "libraries": [
{
"id": "1.8.1",
"time": "2014-11-24T14:13:31+00:00",
"releaseTime": "2014-11-24T14:13:31+00:00",
"type": "release",
"minecraftArguments": "--username ${auth_player_name} --version ${version_name} --gameDir ${game_directory} --assetsDir ${assets_root} --assetIndex ${assets_index_name} --uuid ${auth_uuid} --accessToken ${auth_access_token} --userProperties ${user_properties} --userType ${user_type}",
"minimumLauncherVersion": 14,
"assets": "1.8",
"libraries": [
{
"name": "com.ibm.icu:icu4j-core-mojang:51.2"
},
{
"name": "net.sf.jopt-simple:jopt-simple:4.6"
},
{
"name": "com.paulscode:codecjorbis:20101023"
},
{
"name": "com.paulscode:codecwav:20101023"
},
{
"name": "com.paulscode:libraryjavasound:20101123"
},
{
"name": "com.paulscode:librarylwjglopenal:20100824"
},
{
"name": "com.paulscode:soundsystem:20120107"
},
{
"name": "io.netty:netty-all:4.0.23.Final"
},
{
"name": "com.google.guava:guava:17.0"
},
{
"name": "org.apache.commons:commons-lang3:3.3.2"
},
{
"name": "commons-io:commons-io:2.4"
},
{
"name": "commons-codec:commons-codec:1.9"
},
{
"name": "net.java.jinput:jinput:2.0.5"
},
{
"name": "net.java.jutils:jutils:1.0.0"
},
{
"name": "com.google.code.gson:gson:2.2.4"
},
{
"name": "com.mojang:authlib:1.5.17"
},
{
"name": "com.mojang:realms:1.7.3"
},
{
"name": "org.apache.commons:commons-compress:1.8.1"
},
{
"name": "org.apache.httpcomponents:httpclient:4.3.3"
},
{
"name": "commons-logging:commons-logging:1.1.3"
},
{
"name": "org.apache.httpcomponents:httpcore:4.3.2"
},
{
"name": "org.apache.logging.log4j:log4j-api:2.0-beta9"
},
{
"name": "org.apache.logging.log4j:log4j-core:2.0-beta9"
},
{
"name": "org.lwjgl.lwjgl:lwjgl:2.9.1",
"rules": [
{
"action": "allow"
},
{
"action": "disallow",
"os": {
"name": "osx"
}
}
]
},
{
"name": "org.lwjgl.lwjgl:lwjgl_util:2.9.1",
"rules": [
{
"action": "allow"
},
{
"action": "disallow",
"os": {
"name": "osx"
}
}
]
},
{
"name": "org.lwjgl.lwjgl:lwjgl-platform:2.9.1",
"natives": {
"linux": "natives-linux",
"windows": "natives-windows",
"osx": "natives-osx"
},
"extract": {
"exclude": [
"META-INF/"
]
},
"rules": [
{
"action": "allow"
},
{
"action": "disallow",
"os": {
"name": "osx"
}
}
]
},
{
"name": "org.lwjgl.lwjgl:lwjgl:2.9.2-nightly-20140822",
"rules": [
{
"action": "allow",
"os": {
"name": "osx"
}
}
]
},
{
"name": "org.lwjgl.lwjgl:lwjgl_util:2.9.2-nightly-20140822",
"rules": [
{
"action": "allow",
"os": {
"name": "osx"
}
}
]
},
{
"name": "org.lwjgl.lwjgl:lwjgl-platform:2.9.2-nightly-20140822",
"natives": {
"linux": "natives-linux",
"windows": "natives-windows",
"osx": "natives-osx"
},
"extract": {
"exclude": [
"META-INF/"
]
},
"rules": [
{
"action": "allow",
"os": {
"name": "osx"
}
}
]
},
{
"name": "net.java.jinput:jinput-platform:2.0.5",
"natives": {
"linux": "natives-linux",
"windows": "natives-windows",
"osx": "natives-osx"
},
"extract": {
"exclude": [
"META-INF/"
]
}
},
{
"name": "tv.twitch:twitch:6.5"
},
{
"name": "tv.twitch:twitch-platform:6.5",
"rules": [
{
"action": "allow"
},
{
"action": "disallow",
"os": {
"name": "linux"
}
}
],
"natives": {
"linux": "natives-linux",
"windows": "natives-windows-${arch}",
"osx": "natives-osx"
},
"extract": {
"exclude": [
"META-INF/"
]
}
},
{
"name": "tv.twitch:twitch-external-platform:4.5",
"rules": [
{
"action": "allow",
"os": {
"name": "windows"
}
}
],
"natives": {
"windows": "natives-windows-${arch}"
},
"extract": {
"exclude": [
"META-INF/"
]
}
}
],
"mainClass": "net.minecraft.client.main.Main"
}
I want to convert libraries from json file into: <package>/<name>/<version>/<name>-<version>.jar, for example:
the library file detail: "name": "tv.twitch:twitch-external-platform:4.5", convert to
tv\twitch\twitch-external-platform\twitch-external-platform-4.5.jar
then, the download url = "https://libraries.minecraft.net/package/name/version/name-version.jar"
<package> = tv
<name> = twitch
<version> = 4.5
<name>-<version>.jar = twitch-external-platform-4.5.jar
and add this converted strings into list of string, then use the webclient to download files (listed in list of string),and download in one at a time, and save to file path, the file path like this: root + "\libraries\package\name\version\name-version.jar.(Download path is always changing, when another file will be downloaded
(Dim root as string = Application.StartUpPath)
'Convert Download String:
Public Function ConvertStringDownloadable(ByVal name As String, ByVal natives As Dictionary(Of String, String), ByVal native As Integer) As String
Dim newName As String
Dim version = Regex.Match(name, "(\d+\.?){1,}(-?\w+)*$", RegexOptions.Multiline).Value
Dim typeName As String = Regex.Match(name.Remove(name.LastIndexOf(":"c)), "(-?\w+)+$", RegexOptions.Multiline).Value
newName = Regex.Replace(name.Replace(version, ""), "[:.]", "/")
If (IsNothing(natives)) Then
newName = String.Format("{0}{2}/{1}-{2}.jar", _
newName, typeName, version)
Else
newName = String.Format("{0}{2}/{1}-{2}-{3}.jar", _
newName, typeName, version, natives.Values(native))
End If
If newName.Contains("${arch}") Then
newName = newName.Replace("${arch}", "64")
End If
Return newName
End Function
'Convert Launch String:
Public Function ConvertStringLaunchable(ByVal name As String, ByVal natives As Dictionary(Of String, String), ByVal native As Integer) As String
Dim newName As String
Dim version = Regex.Match(name, "(\d+\.?){1,}(-?\w+)*$", RegexOptions.Multiline).Value
Dim typeName As String = Regex.Match(name.Remove(name.LastIndexOf(":"c)), "(-?\w+)+$", RegexOptions.Multiline).Value
newName = Regex.Replace(name.Replace(version, ""), "[:.]", "\")
If (IsNothing(natives)) Then
newName = String.Format("{0}{2}\{1}-{2}.jar", _
newName, typeName, version)
Else
newName = String.Format("{0}{2}\{1}-{2}-{3}.jar", _
newName, typeName, version, natives.Values(native))
End If
If newName.Contains("${arch}") Then
newName = newName.Replace("${arch}", "64")
End If
Return newName
End Function
'Load library files:
Public Async Function LoadLibraries() As Task
Dim item As String = SelectedGameVersion
Dim client As New WebClient()
Await client.DownloadFileTaskAsync(New Uri("https://s3.amazonaws.com/Minecraft.Download/versions/" + item + "/" + item + ".json"), Root + "\versions\" + item + "\" + item + ".json")
Dim JSONREADER As New StreamReader(Root + "\versions\" + item + "\" + item + ".json")
Dim JSON As String = JSONREADER.ReadToEnd()
JSONREADER.Close()
Dim JSONResult = JsonConvert.DeserializeObject(Of Object)(JSON)
For Each i In JSONResult("libraries").Children()
temp.ListView1.Items.Add(i.ToObject(Of libraries).name)
Next
End Function
To get values, I created a list of string to list values:
Dim librariesDownloadString As New List(Of String)
Dim librariesLaunchString As New List(Of String)
To add values to lists:
For Each item1 As ListViewItem In temp.ListView1.Items
librariesDownloadString.Add(ConvertStringDownloadable(item1.Text, Nothing, 0))
Next
For Each item2 As ListViewItem In temp.ListView1.Items
librariesLaunchString.Add(ConvertStringLaunchable(item2.Text, Nothing, 0))
Next