Querydsl--using postgresql's values - sql

Can I translate this sql into querydsl form?
select count(ppe),v.name
from personal_progress_entity ppe left join user_detail_entity ude
on ppe.student_entity_id=ude.user_id
right join (values ('aaa'),('bbb'),('ccc'),('ddd')) as v(name)
on ude.people_category=v.name
group by v.name;

The PostgreSQL VALUES function is not supported by querydsl. However, you can get the same result using a UNION.
CREATE TABLE personal_progress_entity(student_entity_id INTEGER);
INSERT INTO personal_progress_entity VALUES (1);
CREATE TABLE user_detail_entity(user_id INTEGER, people_category VARCHAR);
INSERT INTO user_detail_entity VALUES (1, 'aaa');
INSERT INTO user_detail_entity VALUES (1, 'bbb');
SELECT COUNT(personal_progress_entity.student_entity_id),
name.value_alias
FROM personal_progress_entity personal_progress_entity
LEFT JOIN user_detail_entity user_detail_entity ON personal_progress_entity.student_entity_id = user_detail_entity.user_id
RIGHT JOIN ((SELECT 'aaa' AS value_alias)
UNION
(SELECT 'bbb' AS value_alias)
UNION
(SELECT 'ccc' AS value_alias)
UNION
(SELECT 'ddd' AS value_alias)) AS name
ON name.value_alias = user_detail_entity.people_category
GROUP BY name.value_alias;
Gives:
1 "aaa"
1 "bbb"
0 "ddd"
0 "ccc"
Here's my querydsl-sql implementation. I've added the private static <T> Union<T> union(PathBuilder<T> pathBuilder, T... values) method to reduce boilerplate.
public List<Tuple> stackoverflowAnswer() {
PathBuilder<String> valueAlias = new PathBuilder<>(String.class, "value_alias");
PathBuilder<String> name = new PathBuilder<>(String.class, "name");
return query().select(personalProgressEntity.studentEntityId.count(), name.get(valueAlias))
.from(personalProgressEntity)
.leftJoin(userDetailEntity).on(personalProgressEntity.studentEntityId.eq(userDetailEntity.userId))
.rightJoin(union(valueAlias, "aaa", "bbb", "ccc", "ddd"), name).on(name.get(valueAlias).eq(userDetailEntity.peopleCategory))
.groupBy(name.get(valueAlias))
.fetch();
}
private static <T> Union<T> union(PathBuilder<T> pathBuilder, T... values) {
return SQLExpressions.union(
Stream.of(values)
.map(p -> SQLExpressions.select(Expressions.constantAs(p, pathBuilder)))
.collect(Collectors.toList()));
}

Related

create a group of linked items

There is a list of users, who buy different product items. I want to group the item by user buying behavior. If any user buys two products, these shall be in the same group. The buying links the products.
user
item
1
cat food
1
cat toy
2
cat toy
2
cat snacks
10
dog food
10
dog collar
11
dog food
11
candy
12
candy
12
apples
15
paper
In this sample case all items for a cat shall be grouped together: "cat food" to "cat toy" to "cat snacks". The items with dog, candy, apples should be one group, because user buying’s link these. The paper is another group.
There are about 200 different products in the table and I need to do a disjoint-set union (DSU).
In JavaScript there several implementation of Disjoint Set Union (DSU), here this was used for the user defined function (UDF) in BigQuery. The main idea is to use a find and union function and to save the linking in a tree, represented as an array, please see here for details.
create temp function DSU(A array<struct<a string,b string>>)
returns array<struct<a string,b string>>
language js as
"""
// https://gist.github.com/KSoto/3300322fc2fb9b270dce2bf1e3d80cf3
// Disjoint-set bigquery
class DSU {
constructor() {
this.parents = [];
}
find(x) {
if(typeof this.parents[x] != "undefined") {
if(this.parents[x]<0) {
return x;
} else {
if(this.parents[x]!=x) {
this.parents[x]=this.find(this.parents[x]);
}
return (this.parents[x]);
}
} else {
this.parents[x]=-1;
return x;
}
}
union(x,y) {
var xpar = this.find(x);
var ypar = this.find(y);
if(xpar != ypar) {
this.parents[xpar]+=this.parents[ypar];
this.parents[ypar]=xpar;
}
}
console_print() {
// console.log(this.parents);
}
}
var dsu = new DSU();
for(var i in A){
dsu.union(A[i].a,A[i].b);
}
var out=[]
for(var i in A){
out[i]={b:dsu.find(A[i].a),a:A[i].a};
}
return out;
""";
with #recursive
your_table as (
SELECT 1 as user, "cat food" as item
UNION ALL SELECT 1, "cat toy"
UNION ALL SELECT 2, "cat snacks"
UNION ALL SELECT 2, "cat toy"
UNION ALL SELECT 10, "dog food"
union all select 10, "dog collar"
union all select 11, "dog food"
union all select 11, "candy"
union all select 12, "candy"
union all select 12, "apples"
union all select 15, "paper"
), helper as (
select distinct a, b
from (
Select user,min(item) as b, array_agg(item) as a_list
from your_table
group by 1
), unnest(a_list) as a
)
Select * except(tmp_count),
first_value(item) over(partition by b order by tmp_count desc,b) as item_most_common
from
(
select * ,
count(item) over(partition by b,item) as tmp_count
from your_table
left join (select X.a, min(X.b) as b from (select DSU(array_agg(struct(''||a,''||b))) as X from helper),unnest(X) X group by 1 order by 1) as combinder
on ''||item=combinder.a
)
The data is in the table your_table. A helper table is used to buid all pairs of two items, which any user brought. Combined as an array, this is giving to the UDF DSU. This function returns all items in column a and in column b the group. We want the most common item of the group to be shown as group name, therefore we use some window functions to determine it.

sql server, replace chars in string with values in table

how can i replace values in string with values that are in a table?
for example
select *
into #t
from
(
select 'bla'c1,'' c2 union all
select 'table'c1,'TABLE' c2 union all
select 'value'c1,'000' c2 union all
select '...'c1,'' c2
)t1
declare #s nvarchaR(max)='this my string and i want to replace all values that are in table #t'
i have some values in my table and i want to replace C1 with C2 in my string.
the results should be
this my string and i want to replace all 000 that are in TABLE #t
UPDATE:
i solved with a CLR
using System;
using Microsoft.SqlServer.Server;
using System.Data.SqlTypes;
using System.Data.Linq;
namespace ReplaceValues
{
public partial class Functions
{
[SqlFunction
(
//DataAccess = DataAccessKind.Read,
SystemDataAccess = SystemDataAccessKind.Read
)
]
public static string ReplaceValues(string row, string delimitator, string values, string replace/*, bool CaseSensitive*/)
{
//return row;
string[] tmp_values = values.Split(new string[] { delimitator }, StringSplitOptions.None);
string[] tmp_replace = replace.Split(new string[] { delimitator }, StringSplitOptions.None);
row = row.ToUpper();
for (int i = 0; i < Math.Min(tmp_values.Length, tmp_replace.Length); i++)
{
row = row.Replace(tmp_values[i].ToUpper(), tmp_replace[i]);
}
return row;
}
}
}
and then
select *
into #t
from
(
select 'value1'OldValue,'one'NewValue union all
select 'value2'OldValue,'two'NewValue union all
select 'value3'OldValue,'three'NewValue union all
select 'value4'OldValue,'four'NewValue
)t1
select dbo.ReplaceValues(t1.column,'|',t2.v,t2.r)
from MyTable t1
cross apply
(
select dbo.inlineaggr(i1.OldValue,'|',1,1)v,
dbo.inlineaggr(i1.NewValue,'|',1,1)r
from #t i1
)t2
i have to improved it to manage better the case sensitive, but performance are not bad.
(also 'inlineaggr' is a CLR i wrote years ago)
You can do this via recursion. Assuming you have a table of find-replace pairs, you can number the rows and then use recursive cte:
create table #t(c1 nvarchar(100), c2 nvarchar(100));
insert into #t(c1, c2) values
('bla', ''),
('table', 'table'),
('value', '000'),
('...', '');
declare #s nvarchar(max) = 'this my string and i want to replace all values that are in table #t';
with ncte as (
select row_number() over (order by (select null)) as rn, *
from #t
), rcte as (
select rn, replace(#s, c1, c2) as newstr
from ncte
where rn = 1
union all
select ncte.rn, replace(rcte.newstr, ncte.c1, ncte.c2)
from ncte
join rcte on ncte.rn = rcte.rn + 1
)
select *
from rcte
where rn = 4

How to set more than a filter over JSON data using JSON_VALUE?

I'm just trying to set a query to get data from a collection of object JSON:
create table test (LINE_SPECS nvarchar(max));
insert into test values (N'
{
"lineName":"GHjr",
"pipeDiameter":"12",
"pipeLength":"52000",
"pressure":"15",
"volume":"107"
},
{
"lineName":"Ks3R",
"pipeDiameter":"9",
"pipeLength":"40000",
"pressure":"15",
"volume":"80"
}
');
Now, as getting lineName of the first object ( lineName : Ghjr) is a success
select
JSON_VALUE(LINE_SPECS, '$.lineName') as line_name
, JSON_VALUE(LINE_SPECS, '$.pipeDiameter') as diameter
from test
WHERE JSON_VALUE(LINE_SPECS, '$.lineName') = 'GHjr'
;
that is not possible when I try to get the second that is "Ks3R" :
select
JSON_VALUE(LINE_SPECS, '$.lineName') as line_name
, JSON_VALUE(LINE_SPECS, '$.pipeDiameter') as diameter
from test
WHERE JSON_VALUE(LINE_SPECS, '$.lineName') = 'Ks3R'
How can I do that ?
Thanks.
First your JSON data isn't valid, it might be an array.
look like this.
create table test (LINE_SPECS nvarchar(max));
insert into test values (N'
[
{
"lineName":"GHjr",
"pipeDiameter":"12",
"pipeLength":"52000",
"pressure":"15",
"volume":"107"
},
{
"lineName":"Ks3R",
"pipeDiameter":"9",
"pipeLength":"40000",
"pressure":"15",
"volume":"80"
}
]');
You can try to use OPENJSON with CROSS APPLY to parse JSON and make it.
select
t2.*
from test t1
CROSS APPLY
OPENJSON(t1.LINE_SPECS)
WITH
(
line_name varchar(MAX) N'$.lineName',
diameter varchar(MAX) N'$.pipeDiameter'
) AS t2
WHERE line_name = 'Ks3R'
sqlfiddle

Parent child hierarchy path without using CTE

Hi I have following tables:
create table Features
(
FeatureId bigint,
FeatureName varchar(255),
ParentId bigint
)
insert into Features values(10, 'Feature 1', 1);
insert into Features values(11, 'Feature 2', 10);
insert into Features values(12, 'Feature 3', 11);
insert into Features values(13, 'Feature 4', 2);
insert into Features values(14, 'Feature 5', 13);
insert into Features values(15, 'Feature 6', 3);
insert into Features values(16, 'Feature 7', 15);
insert into Features values(17, 'Feature 8', 16);
insert into Features values(18, 'Feature 9', 17);
insert into Features values(19, 'Feature 10', 18);
insert into Features values(20, 'Feature 11', 19);
insert into Features values(21, 'Feature 12', 12);
create table Scenarios
(
ScenarioId bigint,
ParentId bigint,
ScenarioTitle varchar(25)
)
insert into Scenarios values(1, 0, 'Scenario 1')
insert into Scenarios values(2, 0, 'Scenario 2')
insert into Scenarios values(3, 0, 'Scenario 3')
Here, a feature can have either another feature as parent or a scenario as parent. For scenario, parent id can either be 0, or another scenario.
I would like to get path of each feature as follows:
FeatureId ParentId FeatureName PathString PathLength
10 1 Feature 1 1 0
11 10 Feature 2 1/10 1
12 11 Feature 3 1/10/11 2
13 2 Feature 4 2 0
14 13 Feature 5 2/13 1
15 3 Feature 6 3 0
16 15 Feature 7 3/15 1
17 16 Feature 8 3/15/16 2
18 17 Feature 9 3/15/16/17 3
19 18 Feature 10 3/15/16/17/18 4
20 19 Feature 11 3/15/16/17/18/19 5
21 12 Feature 12 1/10/11/12 3
Since I would like to collect this result in a temp table for further processing, I tried select into and Azure SQL DW throws Using SELECT INTO statement is not supported in Parallel Data Warehouse. Modify the statement and re-try executing it.
Here is my query (may not be in great shape as I am still figuring out recursive sql)
drop table FeaturesWithPath;
;WITH FeaturePaths (FeatureId, ParentId, FeatureName, PathString)
AS
(
-- Anchor member definition
SELECT g.FeatureId, g.ParentId, g.FeatureName, cast(CAST(g.ParentId as nvarchar(max)) as varchar(max)) as PathString
FROM dbo.Features AS g
UNION ALL
-- Recursive member definition
SELECT g.FeatureId, g.ParentId, g.FeatureName, PathString + '/' + cast(g.ParentId as varchar(max))
FROM dbo.Features AS g
INNER JOIN FeaturePaths AS gp
ON g.ParentId = gp.FeatureId
)
SELECT FeatureId, ParentId, FeatureName, PathString into FeaturesWithPath FROM FeaturePaths;
--select * from FeaturesWithPath order by FeatureId;
drop table FeaturesWithPathLength;
select *, LEN(PathString) - LEN(REPLACE(PathString, '/', '')) as PathLength into FeaturesWithPathLength from FeaturesWithPath
--select * from FeaturesWithPathLength order by FeatureId
drop table MaxFeaturePathLenghtRowTable;
select * into MaxFeaturePathLenghtRowTable
from FeaturesWithPathLength
where PathLength = (select max(PathLength) from FeaturesWithPathLength as f where f.FeatureId = FeaturesWithPathLength.FeatureId)
or PathLength = (select max(PathLength) from FeaturesWithPathLength as f where f.FeatureId = FeaturesWithPathLength.FeatureId
and PathLength > (select max(PathLength) from FeaturesWithPathLength as f2 where f2.FeatureId = FeaturesWithPathLength.FeatureId));
--select * from MaxFeaturePathLenghtRowTable order by FeatureId
drop table FeaturesPerParentTable
select FeatureId, [value] as NewParentId, FeatureName, COALESCE(NULLIF(SUBSTRING(PathString, 0, CHARINDEX('/', PathString)), ''), [value]) AS ScenarioId into FeaturesPerParentTable
from MaxFeaturePathLenghtRowTable
cross apply STRING_SPLIT (PathString, '/') cs order by FeatureId
select * from FeaturesPerParentTable order by FeatureId;
I tried to convert the CTE to use CTAS which did not work either.
This is how I tried CTAS:
;WITH FeaturePaths (FeatureId, ParentId, FeatureName, PathString)
AS
(
-- Anchor member definition
SELECT g.FeatureId, g.ParentId, g.FeatureName, cast(CAST(g.ParentId as nvarchar(max)) as varchar(max)) as PathString
FROM dbo.Features AS g
--WHERE parentId=0
UNION ALL
-- Recursive member definition
SELECT g.FeatureId, g.ParentId, g.FeatureName, PathString + '/' + cast(g.ParentId as varchar(max))
FROM dbo.Features AS g
INNER JOIN FeaturePaths AS gp
ON g.ParentId = gp.FeatureId
)
CREATE TABLE #tmp_fct
WITH
(
DISTRIBUTION = ROUND_ROBIN
)
AS
SELECT FeatureId, ParentId, FeatureName, PathString
FROM FeaturePaths;
Now I am wondering if there is a way to get path for each Feature on Azure SQL DW and collect result in to a table.
-- UPDATE --
For solution in SQL see this
Here is solution in C#
void Main()
{
var scenarios = new List<Scenario> {
new Scenario{Id = 1, Title = "Scenario 1", ParentId = 0},
new Scenario{Id = 2, Title = "Scenario 2", ParentId = 0},
new Scenario{Id = 3, Title = "Scenario 3", ParentId = 0},
};
var features = new List<Feature> {
new Feature{Id =10, Title = "Feature 1", ParentId =1},
new Feature{Id =11, Title = "Feature 2", ParentId =10},
new Feature{Id =12, Title = "Feature 3", ParentId =11},
new Feature{Id =13, Title = "Feature 4", ParentId =2},
new Feature{Id =14, Title = "Feature 5", ParentId =13},
new Feature{Id =15, Title = "Feature 6", ParentId =3},
new Feature{Id =16, Title = "Feature 7", ParentId =15},
new Feature{Id =17, Title = "Feature 8", ParentId =16},
new Feature{Id =18, Title = "Feature 9", ParentId =17},
new Feature{Id =19, Title = "Feature 10", ParentId =18},
new Feature{Id =20, Title = "Feature 11", ParentId =19},
new Feature{Id =21, Title = "Feature 12", ParentId =12}
};
var scenarioIds = new HashSet<long>(scenarios.Select(x => x.Id));
//get path
IList<Feature> withPath = features.Select(x => { x.Path = GetPath(x, features, scenarioIds); return x; }).ToList().Dump("With path");
}
private string GetPath(Feature f, IList<Feature> features, HashSet<long> scenarioIds)
{
if (scenarioIds.Contains(f.ParentId))
{
return f.ParentId.ToString();
}
else
{
var parent = features.First(d => d.Id == f.ParentId);
return GetPath(parent, features, scenarioIds) + "/" + f.ParentId;
}
}
public class Scenario
{
public long Id { get; set; }
public string Title { get; set; }
public long ParentId { get; set; }
}
public class Feature
{
public long Id { get; set; }
public string Title { get; set; }
public long ParentId { get; set; }
public string Path { get; set; } //temp
}
As Azure SQL Data Warehouse does not support recursive CTEs or cursors at this time, you could do this with a good old-fashioned loop, eg:
-- Loop thru Features
DECLARE #counter INT = 1;
-- Insert first record where no parent exists
IF OBJECT_ID('tempdb..#features') IS NOT NULL DROP TABLE #features;
CREATE TABLE #features
WITH
(
DISTRIBUTION = HASH ( FeatureId ),
LOCATION = USER_DB
)
AS
WITH cte AS
(
SELECT 1 AS xlevel, p.FeatureId, p.ParentId, p.FeatureName, CAST( p.ParentId AS VARCHAR(255) ) AS PathString, 0 AS PathLength
FROM dbo.Features p
WHERE NOT EXISTS
(
SELECT *
FROM dbo.Features c
WHERE p.ParentId = c.FeatureId
)
)
SELECT *
FROM cte;
SELECT 'before' s, * FROM #features ORDER BY FeatureId;
-- Loop recursively through the child records
WHILE EXISTS (
SELECT *
FROM #features p
INNER JOIN dbo.features c ON p.FeatureId = c.ParentId
WHERE p.xlevel = #counter
)
BEGIN
-- Insert next level
INSERT INTO #features ( xlevel, FeatureId, ParentId, FeatureName, PathString, PathLength )
SELECT #counter + 1 AS xlevel, c.FeatureId, c.ParentId, c.FeatureName, p.PathString + '/' + CAST( c.ParentId AS VARCHAR(255) ) AS PathString, #counter AS PathLength
FROM #features p
INNER JOIN dbo.features c ON p.FeatureId = c.ParentId
WHERE p.xlevel = #counter;
SET #counter += 1;
-- Loop safety
IF #counter > 99
BEGIN
RAISERROR( 'Too many loops!', 16, 1 )
BREAK
END;
END
SELECT 'after' s, * FROM #features ORDER BY FeatureId;
Full code including setup is available here.
My results:
Hope that helps.
Why not create the FeaturesWithPath table beforehand and insert into it using the following pseudocode?
CREATE TABLE FeaturesWithPath (FeatureId type, ParentId type, FeatureName type, PathString type)
;with FeaturePaths (FeatureId, ParentId, FeatureName, PathString)
AS
(
-- Anchor member definition
SELECT g.FeatureId, g.ParentId, g.FeatureName, cast(CAST(g.ParentId as nvarchar(max)) as varchar(max)) as PathString
FROM dbo.Features AS g
UNION ALL
-- Recursive member definition
SELECT g.FeatureId, g.ParentId, g.FeatureName, PathString + '/' + cast(g.ParentId as varchar(max))
FROM dbo.Features AS g
INNER JOIN FeaturePaths AS gp
ON g.ParentId = gp.FeatureId
)
insert FeaturesWithPath (FeatureId, ParentId, FeatureName, PathString)
SELECT FeatureId, ParentId, FeatureName, PathString FROM FeaturePaths;

Update multiple rows in same query using PostgreSQL

I'm looking to update multiple rows in PostgreSQL in one statement. Is there a way to do something like the following?
UPDATE table
SET
column_a = 1 where column_b = '123',
column_a = 2 where column_b = '345'
You can also use update ... from syntax and use a mapping table. If you want to update more than one column, it's much more generalizable:
update test as t set
column_a = c.column_a
from (values
('123', 1),
('345', 2)
) as c(column_b, column_a)
where c.column_b = t.column_b;
You can add as many columns as you like:
update test as t set
column_a = c.column_a,
column_c = c.column_c
from (values
('123', 1, '---'),
('345', 2, '+++')
) as c(column_b, column_a, column_c)
where c.column_b = t.column_b;
sql fiddle demo
Based on the solution of #Roman, you can set multiple values:
update users as u set -- postgres FTW
email = u2.email,
first_name = u2.first_name,
last_name = u2.last_name
from (values
(1, 'hollis#weimann.biz', 'Hollis', 'Connell'),
(2, 'robert#duncan.info', 'Robert', 'Duncan')
) as u2(id, email, first_name, last_name)
where u2.id = u.id;
Yes, you can:
UPDATE foobar SET column_a = CASE
WHEN column_b = '123' THEN 1
WHEN column_b = '345' THEN 2
END
WHERE column_b IN ('123','345')
And working proof: http://sqlfiddle.com/#!2/97c7ea/1
For updating multiple rows in a single query, you can try this
UPDATE table_name
SET
column_1 = CASE WHEN any_column = value and any_column = value THEN column_1_value end,
column_2 = CASE WHEN any_column = value and any_column = value THEN column_2_value end,
column_3 = CASE WHEN any_column = value and any_column = value THEN column_3_value end,
.
.
.
column_n = CASE WHEN any_column = value and any_column = value THEN column_n_value end
if you don't need additional condition then remove and part of this query
Let's say you have an array of IDs and equivalent array of statuses - here is an example how to do this with a static SQL (a sql query that doesn't change due to different values) of the arrays :
drop table if exists results_dummy;
create table results_dummy (id int, status text, created_at timestamp default now(), updated_at timestamp default now());
-- populate table with dummy rows
insert into results_dummy
(id, status)
select unnest(array[1,2,3,4,5]::int[]) as id, unnest(array['a','b','c','d','e']::text[]) as status;
select * from results_dummy;
-- THE update of multiple rows with/by different values
update results_dummy as rd
set status=new.status, updated_at=now()
from (select unnest(array[1,2,5]::int[]) as id,unnest(array['a`','b`','e`']::text[]) as status) as new
where rd.id=new.id;
select * from results_dummy;
-- in code using **IDs** as first bind variable and **statuses** as the second bind variable:
update results_dummy as rd
set status=new.status, updated_at=now()
from (select unnest(:1::int[]) as id,unnest(:2::text[]) as status) as new
where rd.id=new.id;
Came across similar scenario and the CASE expression was useful to me.
UPDATE reports SET is_default =
case
when report_id = 123 then true
when report_id != 123 then false
end
WHERE account_id = 321;
Reports - is a table here, account_id is same for the report_ids mentioned above. The above query will set 1 record (the one which matches the condition) to true and all the non-matching ones to false.
The answer provided by #zero323 works great on Postgre 12. In case, someone has multiple values for column_b (referred in OP's question)
UPDATE conupdate SET orientation_status = CASE
when id in (66934, 39) then 66
when id in (66938, 49) then 77
END
WHERE id IN (66934, 39, 66938, 49)
In the above query, id is analogous to column_b; orientation_status is analogous to column_a of the question.
In addition to other answers, comments and documentation, the datatype cast can be placed on usage. This allows an easier copypasting:
update test as t set
column_a = c.column_a::number
from (values
('123', 1),
('345', 2)
) as c(column_b, column_a)
where t.column_b = c.column_b::text;
#Roman thank you for the solution, for anyone using node, I made this utility method to pump out a query string to update n columns with n records.
Sadly it only handles n records with the same columns so the recordRows param is pretty strict.
const payload = {
rows: [
{
id: 1,
ext_id: 3
},
{
id: 2,
ext_id: 3
},
{
id: 3,
ext_id: 3
} ,
{
id: 4,
ext_id: 3
}
]
};
var result = updateMultiple('t', payload);
console.log(result);
/*
qstring returned is:
UPDATE t AS t SET id = c.id, ext_id = c.ext_id FROM (VALUES (1,3),(2,3),(3,3),(4,3)) AS c(id,ext_id) WHERE c.id = t.id
*/
function updateMultiple(table, recordRows){
var valueSets = new Array();
var cSet = new Set();
var columns = new Array();
for (const [key, value] of Object.entries(recordRows.rows)) {
var groupArray = new Array();
for ( const [key2, value2] of Object.entries(recordRows.rows[key])){
if(!cSet.has(key2)){
cSet.add(`${key2}`);
columns.push(key2);
}
groupArray.push(`${value2}`);
}
valueSets.push(`(${groupArray.toString()})`);
}
var valueSetsString = valueSets.join();
var setMappings = new String();
for(var i = 0; i < columns.length; i++){
var fieldSet = columns[i];
setMappings += `${fieldSet} = c.${fieldSet}`;
if(i < columns.length -1){
setMappings += ', ';
}
}
var qstring = `UPDATE ${table} AS t SET ${setMappings} FROM (VALUES ${valueSetsString}) AS c(${columns}) WHERE c.id = t.id`;
return qstring;
}
I don't think the accepted answer is entirely correct. It is order dependent. Here is an example that will not work correctly with an approach from the answer.
create table xxx (
id varchar(64),
is_enabled boolean
);
insert into xxx (id, is_enabled) values ('1',true);
insert into xxx (id, is_enabled) values ('2',true);
insert into xxx (id, is_enabled) values ('3',true);
UPDATE public.xxx AS pns
SET is_enabled = u.is_enabled
FROM (
VALUES
(
'3',
false
,
'1',
true
,
'2',
false
)
) AS u(id, is_enabled)
WHERE u.id = pns.id;
select * from xxx;
So the question still stands, is there a way to do it in an order independent way?
---- after trying a few things this seems to be order independent
UPDATE public.xxx AS pns
SET is_enabled = u.is_enabled
FROM (
SELECT '3' as id, false as is_enabled UNION
SELECT '1' as id, true as is_enabled UNION
SELECT '2' as id, false as is_enabled
) as u
WHERE u.id = pns.id;