Dataweave filtering nested arrays and displaying in descending order - mule

I'm trying to filter an array based on some values nested in objects.
My data pertains to offers (array), customers(array) with tickets(array) and other child arrays.
I want to orderBy to get all the customers information ordered by the latest timeStamp (attribute in tickets array)
From the example, offer 1 has customer 50001 with tickets 1001, 1002 and customer 50002 with tickets 1003, 1004. I want the customer which has latest timestamp in all the tickets available to be displayed first: (Desc order) with all the other passengers ordered accordingly.
Request Payload:
{
"count": 1,
"offers": [{
"offerInfo": {
"orderNumber": "1",
"orderCreationDtTime": "2023-01-10 00:00:00"
},
"customers": [{
"customerInfo": {
"name": {
"frstNm": "JOHN",
"lstNm": "DOE"
}
},
"customerNum": "50001",
"tickets": [{
"timestamp": "2023-01-07 00:38:00.167000",
"ticketService": {
"ticketNum": "1001",
"ticketType": "3"
},
"ticketReps": [{
"seq": "1",
"comment": "1st",
"location": "US"
},
{
"seq": "2",
"comment": "2nd",
"location": "US"
}
]
},
{
"timestamp": "2023-01-11 00:38:00.167000",
"ticketService": {
"ticketNum": "1002",
"ticketType": "3"
},
"ticketReps": [{
"seq": "3",
"comment": "1st",
"location": "US"
},
{
"seq": "4",
"comment": "2nd",
"location": "US"
}
]
}
]
},
{
"customerInfo": {
"name": {
"frstNm": "FAN",
"lstNm": "SING"
}
},
"customerNum": "50002",
"tickets": [{
"timestamp": "2023-01-10 00:38:00.167000",
"ticketService": {
"ticketNum": "1003",
"ticketType": "3"
},
"ticketReps": [{
"seq": "1",
"comment": "1st",
"location": "US"
},
{
"seq": "2",
"comment": "2nd",
"location": "US"
}
]
},
{
"timestamp": "2023-01-19 00:38:00.167000",
"ticketService": {
"ticketNum": "1004",
"ticketType": "3"
},
"ticketReps": [{
"seq": "1",
"comment": "1st",
"location": "US"
},
{
"seq": "2",
"comment": "2nd",
"location": "US"
}
]
}
]
}
]
}]
}
Expecting Payload after transform message:
{
"count": 1,
"offers": [{
"offerInfo": {
"orderNumber": "1",
"orderCreationDtTime": "2023-01-10 00:00:00"
},
"customers": [{
"customerInfo": {
"name": {
"frstNm": "FAN",
"lstNm": "SING"
}
},
"customerNum": "50002",
"tickets": [{
"timestamp": "2023-01-19 00:38:00.167000",
"ticketService": {
"ticketNum": "1004",
"ticketType": "3"
},
"ticketReps": [{
"seq": "1",
"comment": "1st",
"location": "US"
},
{
"seq": "2",
"comment": "2nd",
"location": "US"
}
]
},
{
"timestamp": "2023-01-10 00:38:00.167000",
"ticketService": {
"ticketNum": "1003",
"ticketType": "3"
},
"ticketReps": [{
"seq": "1",
"comment": "1st",
"location": "US"
},
{
"seq": "2",
"comment": "2nd",
"location": "US"
}
]
}
]
},
{
"customerInfo": {
"name": {
"frstNm": "JOHN",
"lstNm": "DOE"
}
},
"customerNum": "50001",
"tickets": [{
"timestamp": "2023-01-11 00:38:00.167000",
"ticketService": {
"ticketNum": "1002",
"ticketType": "3"
},
"ticketReps": [{
"seq": "3",
"comment": "1st",
"location": "US"
},
{
"seq": "4",
"comment": "2nd",
"location": "US"
}
]
},
{
"timestamp": "2023-01-07 00:38:00.167000",
"ticketService": {
"ticketNum": "1001",
"ticketType": "3"
},
"ticketReps": [{
"seq": "1",
"comment": "1st",
"location": "US"
},
{
"seq": "2",
"comment": "2nd",
"location": "US"
}
]
}
]
}
]
}]
}

Below script will help you.
%dw 2.0
output application/json
import * from dw::util::Values
---
payload update ["offers","customers"] with (
(($ map (
$ update {
case .tickets -> ($ orderBy $.timestamp as LocalDateTime {format : "yyyy-MM-dd HH:mm:ss.SSSSSS"}) [-1 to 0]
}
)) orderBy $.tickets[0].timestamp as LocalDateTime {format : "yyyy-MM-dd HH:mm:ss.SSSSSS"}) [-1 to 0]
)

A solution using the update operator for each step and auxiliary functions for clarity.
%dw 2.0
output application/json
fun convertTimestampToNumber(t)=t as LocalDateTime {format: "yyyy-MM-dd HH:mm:ss.SSSSSS"} as String {format: "yyyyMMddHHmmssSSSSSS"} as Number
fun getMaxTimestamp(t)=max(t map convertTimestampToNumber($.timestamp))
---
payload update {
case offers at .offers ->
offers map ($ update {
case customers at .customers ->
customers map ($ update {
case tickets at .tickets -> tickets orderBy ( -convertTimestampToNumber($.timestamp) )
})
orderBy ( -getMaxTimestamp($.tickets) )
}
)
}
Output:
{
"count": 1,
"offers": [
{
"offerInfo": {
"orderNumber": "1",
"orderCreationDtTime": "2023-01-10 00:00:00"
},
"customers": [
{
"customerInfo": {
"name": {
"frstNm": "FAN",
"lstNm": "SING"
}
},
"customerNum": "50002",
"tickets": [
{
"timestamp": "2023-01-19 00:38:00.167000",
"ticketService": {
"ticketNum": "1004",
"ticketType": "3"
},
"ticketReps": [
{
"seq": "1",
"comment": "1st",
"location": "US"
},
{
"seq": "2",
"comment": "2nd",
"location": "US"
}
]
},
{
"timestamp": "2023-01-10 00:38:00.167000",
"ticketService": {
"ticketNum": "1003",
"ticketType": "3"
},
"ticketReps": [
{
"seq": "1",
"comment": "1st",
"location": "US"
},
{
"seq": "2",
"comment": "2nd",
"location": "US"
}
]
}
]
},
{
"customerInfo": {
"name": {
"frstNm": "JOHN",
"lstNm": "DOE"
}
},
"customerNum": "50001",
"tickets": [
{
"timestamp": "2023-01-11 00:38:00.167000",
"ticketService": {
"ticketNum": "1002",
"ticketType": "3"
},
"ticketReps": [
{
"seq": "3",
"comment": "1st",
"location": "US"
},
{
"seq": "4",
"comment": "2nd",
"location": "US"
}
]
},
{
"timestamp": "2023-01-07 00:38:00.167000",
"ticketService": {
"ticketNum": "1001",
"ticketType": "3"
},
"ticketReps": [
{
"seq": "1",
"comment": "1st",
"location": "US"
},
{
"seq": "2",
"comment": "2nd",
"location": "US"
}
]
}
]
}
]
}
]
}

Related

Snowflake SQL - Dynamically turning irregular JSON data into "CASE WHEN" SQL clause

I feel compelled to specify from the very beginning that I am a total newbie to Snowflake and the idea of JSON data tables.
This being said I find myself faced with the following challenge:
I have a Snowflake table called ANIMALS which holds a JSON object with a semi irregular structure on the conditions part:
{
"animals": [
{
"name": "TIGER",
"id": 101,
"conditions": {
"and": [
{
"operator": "=",
"attribute": "ANIMAL_FAMILY",
"value": "feline"
},
{
"operator": "IN",
"attribute": "SUBSPECIES",
"value": ["Bengal", "Sumatran", "Siberian"]
},
{
"operator": "=",
"attribute": "ORDER",
"value": "carnivorous"
}
]
}
},
{
"name": "CAT",
"id": 102,
"conditions": {
"and": [
{
"operator": "=",
"attribute": "ANIMAL_FAMILY",
"value": "feline"
},
{
"or": [
{
"operator": "IN",
"attribute": "SUBSPECIES",
"value": ["Abyssinian", "Manx", "Siamese", "Sphynx"]
},
{
"operator": "=",
"attribute": "ENVIRONMENT",
"value": "domestic"
}
]
}
]
}
},
{
"name": "DOG",
"id": 103,
"conditions": {
"or": [
{
"operator": "=",
"attribute": "IS_MOST_BELOVED_PET",
"value": true
},
{
"and": [
{
"operator": "=",
"attribute": "ANIMAL_FAMILY",
"value": "canine"
},
{
"operator": "=",
"attribute": "ENVIRONMENT",
"value": "domestic"
}
]
},
{
"and": [
{
"operator": "=",
"attribute": "ENVIRONMENT",
"value": "domestic"
},
{
"or": [
{
"operator": "C",
"attribute": "ANIMAL_FAMILY",
"value": "%feline%"
},
{
"operator": "IN",
"attribute": "SUBSPECIES",
"value": ["Akita", "Beagle", "Border Collie", "Cane Corso", "Chihuahua"]
}
]
}
]
},
]
}
},
{
"name": "HORSE",
"id": 104,
"conditions": {
"operator": "=",
"attribute": "ANIMAL_FAMILY",
"value": "equine"
}
}
]
}
There is absolutely no limit for the degree of nesting the AND and OR operators, which unfortunately makes the conditions very irregular.
Given this JSON I need to dynamically generate a CASE WHEN [condition] THEN [value] clause which will look like this:
CASE WHEN ((ANIMAL_FAMILY = 'feline') AND (SUBSPECIES IN ('Bengal', 'Sumatran', 'Siberian')) AND (ORDER = 'carnivorous')) THEN 'TIGER'
WHEN ((ANIMAL_FAMILY = 'feline') AND ((SUBSPECIES IN ('Abyssinian', 'Manx', 'Siamese', 'Sphynx') OR (ENVIRONMENT = 'domestic')))) THEN 'CAT'
WHEN ((IS_MOST_BELOVED_PET = TRUE) OR ((ANIMAL_FAMILY = 'canine') AND (ENVIRONMENT = 'domestic')) OR ((ENVIRONMENT = 'domestic') AND ((ANIMAL_FAMILY NOT LIKE %feline%) OR (SUBSPECIES IN ('Akita', 'Beagle', 'Border Collie', 'Cane Corso', 'Chihuahua'))))) THEN 'DOG'
WHEN ANIMAL_FAMILY = 'equine' THEN 'HORSE'
END AS animal
Is this doable solely relying on Snowflake queries and no other third-party programming languages?
Any hints and pointers would be greatly appreciated.
Thank you in advance for your replies.

lodash find object by id across an entire nested object

I would like to find name value at whatever level when searching by ID
I assume Lodash would be the best method, I have been searching and testing but no luck, is this possible?
{
"delegate": [
{
"id": "2",
"name": "David",
"balances": [],
"responsibility": [
{
"id": "18",
"name": "Lilly",
"balances": []
}
]
},
{
"id": "12",
"name": "Barb",
"balances": [],
"responsibility": [
{
"id": "3",
"name": "Amy",
"balances": []
},
{
"id": "9",
"name": "John",
"balances": []
}
]
}
],
"responsibility": [
{
"id": "14",
"name": "Shawn",
"balances": []
}
]
}

aggregate in mongodb left join with $lookup

I have three collections
posts=[
{
"id": "p1",
"title": "title 1"
},
{
"id": "p2",
"title": "title 2"
}]
users = [
{
"id": "u1",
"name": "name1"
},
{
"id": "u2",
"name": "name2"
}]
comments = [
{
"userId": "u1",
"postId": "p1",
"comment": "comment 1"
}]
I want to get all collection posts and comments in each post by userId(u1) as:
posts=[
{
"id": "p1",
"title": "title 1",
"comments":[
"userId": "u1",
"comment": "comment 1"
]
},
{
"id": "p2",
"title": "title 2",
"comments":[]
}]
I used aggregate function and $lookup operator but I don't know using the $match operator to filter userId. I used aggregate bellow:
self.db.posts.aggregate([
{
"$lookup":{
"from": "comments",
"localField": "id",
"foreignField": "postId",
"as": "comments",
}
},
{
"$match":{
"comments.userId": {"$eq": param.objectUserId}
},
},
{"$skip": (param.page - 1) * param.pageSize},
{"$limit": param.pageSize},
{"$sort": {"unixDate": pymongo.DESCENDING}}
])
It only return one post in array corresponding with userId="u1"
Please help me!
Thank all!
You have to make use of the pipeline option of $lookup stage and pass the additional conditions that you want to apply.
db.posts.aggregate([
{
"$lookup": {
"from": "comments",
"let": {
"pId": "$id"
},
"pipeline": [
{
"$match": {
"$expr": {
"$eq": [
"$postId",
"$$pId"
],
},
"userId": "u1",
},
},
{
"$project": {
"_id": 0,
"userId": 1,
"comment": 1,
},
},
],
"as": "comments"
}
}
])
Mongo Playground Sample Execution
self.db.posts.aggregate([
{
"$lookup": {
"from": "comments",
"let": {
"pId": "$id"
},
"pipeline": [
{
"$match": {
"$expr": {
"$eq": [
"$postId",
"$$pId"
],
},
"userId": param.objectUserId,
},
},
{
"$project": {
"_id": 0,
"userId": 1,
"comment": 1,
},
},
],
"as": "comments"
}
},
{"$skip": (param.page - 1) * param.pageSize},
{"$limit": param.pageSize},
{"$sort": {"unixDate": pymongo.DESCENDING}}
])

Problem to fetch data in api call in flutter/dart

how can I fetch the name and team_name keys in this API data?
condition: here 18,1,17, etc are subject codes that change according to the subject and not fix this subject available in the next API call.
{
"18": {
"detail": {
"id": "18",
"name": "Hindi"
},
"list": [
{
"id": "5",
"team_name": "Gurpreet",
},
{
"id": "2",
"team_name": "Test1",
}
]
},
"17": {
"detail": {
"id": "17",
"name": "Punjabi"
},
"list": [
{
"id": "6",
"team_name": "Guru",
},
{
"id": "3",
"team_name": "Test",
}
]
},
"1": {
"detail": {
"id": "1",
"name": "History"
},
"list": [
{
"id": "7",
"team_name": "Gurpreet",
}
]
},
"19": {
"detail": {
"id": "19",
"name": "Math"
},
"list": [
{
"id": "4",
"team_name": "Gurpreet",
}
]
},
"status": true
}
Use this code. You can check keys getter to check dynamics key.
import 'dart:convert';
void main() async {
var f = {
"18": {
"detail": {"id": "18", "name": "Hindi"},
"list": [
{
"id": "5",
"team_name": "Gurpreet",
},
{
"id": "2",
"team_name": "Test1",
}
]
},
"17": {
"detail": {"id": "17", "name": "Punjabi"},
"list": [
{
"id": "6",
"team_name": "Guru",
},
{
"id": "3",
"team_name": "Test",
}
]
},
"1": {
"detail": {"id": "1", "name": "History"},
"list": [
{
"id": "7",
"team_name": "Gurpreet",
}
]
},
"19": {
"detail": {"id": "19", "name": "Math"},
"list": [
{
"id": "4",
"team_name": "Gurpreet",
}
]
},
"status": true
};
for (var o in f.keys) {
print(o);
if (f[o] is bool) {
print(f[o]);
} else { // check it is Map. I consider it always is Map
if ((f[o] as Map)['detail'] != null) {
print((f[o] as Map)['detail']['name']);
}
if ((f[o] as Map)['list'] != null) {
print((f[o] as Map)['list'][0]['team_name']); // you can use for here. please check array is not null
}
}
}
}

Compare 2 JSON arrays to get matching and un-matching outputs

I need to compare 2 JSON arrays using Mule 4 dataweave 2.0 to get matching and un-matching outputs.
The sample input JSON payload is given below:
[
{
"CODE": "A11",
"NAME": "Alpha",
"ID": "C10000"
},
{
"CODE": "B12",
"NAME": "Bravo",
"ID": "B20000"
},
{
"CODE": "C11",
"NAME": "Charlie",
"ID": "C30000"
},
{
"CODE": "D12",
"NAME": "Delta",
"ID": "D40000"
},
{
"CODE": "E12",
"NAME": "Echo",
"ID": "E50000"
}
]
This has to be compared to the below on ID/IDENTITY field.
[
{
"IDENTITY": "D40000",
"NM": "Delta"
},
{
"IDENTITY": "C30000",
"NM": "Charlie"
}
]
My expected output is 2 variable arrays containing matching and un-matching objects:
varMatch:
[
{
"CODE": "C11",
"NAME": "Charlie",
"ID": "C30000"
},
{
"CODE": "D12",
"NAME": "Delta",
"ID": "D40000"
}
]
varUnmatch:
[
{
"CODE": "A11",
"NAME": "Alpha",
"ID": "C10000"
},
{
"CODE": "B12",
"NAME": "Bravo",
"ID": "B20000"
},
{
"CODE": "E12",
"NAME": "Echo",
"ID": "E50000"
}
]
If that ID map is coming from somewhere else and you can't change its structure, I'd probably remap it and then use it like so:
%dw 2.0
output application/json
var identMap = [
{
"IDENTITY": "D40000",
"NM": "Delta"
},
{
"IDENTITY": "C30000",
"NM": "Charlie"
}
]
var remapped = identMap reduce ((item,accum={}) -> accum ++ (item.IDENTITY): 1)
---
payload groupBy (if (remapped[$.ID]?) "varMatched" else "varUnmatched")
which produces
{
"varUnmatched": [
{
"CODE": "A11",
"NAME": "Alpha",
"ID": "C10000"
},
{
"CODE": "B12",
"NAME": "Bravo",
"ID": "B20000"
},
{
"CODE": "E12",
"NAME": "Echo",
"ID": "E50000"
}
],
"varMatched": [
{
"CODE": "C11",
"NAME": "Charlie",
"ID": "C30000"
},
{
"CODE": "D12",
"NAME": "Delta",
"ID": "D40000"
}
]
}
Hope this helps
%dw 2.0
var input1=[{
"CODE": "A11",
"NAME": "Alpha",
"ID": "C10000"
},
{
"CODE": "B12",
"NAME": "Bravo",
"ID": "B20000"
},
{
"CODE": "C11",
"NAME": "Charlie",
"ID": "C30000"
},
{
"CODE": "D12",
"NAME": "Delta",
"ID": "D40000"
},
{
"CODE": "E12",
"NAME": "Echo",
"ID": "E50000"
}
]
var input2=[
{
"IDENTITY": "D40000",
"NM": "Delta"
},
{
"IDENTITY": "C30000",
"NM": "Charlie"
}
]
var varMatch = input1 map $ filter (input2.IDENTITY contains $.ID)
var varUnmatch = input1 -- varMatch
output application/json
---
{
varMatch: varMatch,
varUnmatch: varUnmatch
}
Sample Output
{
"varMatch": [
{
"CODE": "C11",
"NAME": "Charlie",
"ID": "C30000"
},
{
"CODE": "D12",
"NAME": "Delta",
"ID": "D40000"
}
],
"varUnmatch": [
{
"CODE": "A11",
"NAME": "Alpha",
"ID": "C10000"
},
{
"CODE": "B12",
"NAME": "Bravo",
"ID": "B20000"
},
{
"CODE": "E12",
"NAME": "Echo",
"ID": "E50000"
}
]
}
Here's another solution, albeit the previous two are solving your problem:
%dw 2.0
output application/dw
var data = [
{
"CODE": "A11",
"NAME": "Alpha",
"ID": "C10000"
},
{
"CODE": "B12",
"NAME": "Bravo",
"ID": "B20000"
},
{
"CODE": "C11",
"NAME": "Charlie",
"ID": "C30000"
},
{
"CODE": "D12",
"NAME": "Delta",
"ID": "D40000"
},
{
"CODE": "E12",
"NAME": "Echo",
"ID": "E50000"
}
]
var searchData = [
{
"IDENTITY": "D40000",
"NM": "Delta"
},
{
"IDENTITY": "C30000",
"NM": "Charlie"
}
]
---
data dw::core::Arrays::partition (e) -> searchData.*IDENTITY contains e.ID
Pick the one that perfoms the best and use it.