comments_table
create table "Comments"
(
comment_id serial
primary key,
post_id integer
references "Posts"
on update cascade on delete cascade,
user_id integer
references "Users"
on update cascade on delete cascade,
comment text,
"comment_likeCount" integer,
comment_pinned boolean,
comment_approved boolean,
comment_parent integer,
"commentCreatedAt" timestamp with time zone not null,
"commnetUpdatedAt" timestamp with time zone not null
);
query
with recursive comments_cte as (
select comment_parent,
coalesce(array_agg(comment_id), array []::integer[]) as replies
from "Comments"
-- where comment_parent in (11,13,1)
group by comment_parent )
select *, array_length(replies, 1) as repliesCount
from "Comments" c
left join comments_cte on (c.comment_id = comments_cte.comment_parent)
where c.comment_parent is null;
output
[
{
"comment_id": 20,
"post_id": 1,
"user_id": 23,
"comment": "second root comment",
"comment_likeCount": 0,
"comment_pinned": true,
"comment_approved": true,
"comment_parent": 20,
"commentCreatedAt": "2022-07-22 21:44:00.526000 +00:00",
"commnetUpdatedAt": "2022-07-22 21:44:01.374000 +00:00",
"replies": [22, 23],
"repliescount": 2
},
{
"comment_id": 1,
"post_id": 1,
"user_id": 23,
"comment": "this a test comment",
"comment_likeCount": 0,
"comment_pinned": true,
"comment_approved": false,
"comment_parent": 1,
"commentCreatedAt": "2022-07-13 16:09:42.909000 +00:00",
"commnetUpdatedAt": "2022-07-13 16:09:42.909000 +00:00",
"replies": [11, 15, 17],
"repliescount": 3
}
]
my query is that how can i limit the replies.
For Example: suppose my database has 100 root comments and has more that 2000 replies in each root comment then it will be really expensive query. If i am able to limit/paginate the number of replies it would really improve the load.
Like YouTube in which you fetch root comments and each root comment has a button to load replies button, which load the replies but not all of them at once.
Related
I currently have a table that contains a content_id, root_id, parent_id and content_level. This table is self-referencing, in which a record could have related child records. The parent records do not know about the child records but the child record know about the parents via the parent_id field.
This is the query used for fetching all the records with the root content at the top. The root content has content_level = 0, and both root_id and parent_id = NULL. For the rest of the records, the root_id field will match the content_id of root record.
SELECT *
FROM jccontent c2
WHERE c2.content_id = 138412032
UNION ALL
(
SELECT j.*
FROM jccontent AS c
INNER JOIN jccontent j on c.content_id = j.parent_id
WHERE j.root_id = 138412032
)
ORDER BY content_level ;
From here, I would like to build a JSON tree structure where it will contain the root as the top element, and then nested children elements that follows. I would like to complete this portion using purely SQL. Currently I have done it in code and it works well, but would like to see if doing it in SQL will be better.
My desired output would be something like this:
{
"content_id": 138412032,
"root_id": null,
"parent_id": null,
"content_level": 0,
"children": [
{
"content_id": 1572864000,
"root_id": 138412032,
"parent_id": 138412032,
"content_level": 1,
"children": [
{
"content_id": 1606418432,
"root_id": 138412032,
"parent_id": 1572864000,
"content_level": 2,
"children": []
},
{
"content_id": 515899393,
"root_id": 138412032,
"parent_id": 1572864000,
"content_level": 2,
"children": [
{
"content_id": 75497471,
"root_id": 138412032,
"parent_id": 515899393,
"content_level": 3,
"children": []
}
]
}
]
},
{
"content_id": 1795162113,
"root_id": 138412032,
"parent_id": 138412032,
"content_level": 1,
"children": []
}
]
}
If there is any additional information required, please let me know. I will be glad to share. Thank you.
try
WITH recursive cte AS (
SELECT content_id, parent_id, content_level
FROM jccontent
WHERE content_id = 138412032
UNION ALL
SELECT j.content_id, j.parent_id, j.content_level
FROM jccontent j
INNER JOIN cte c ON j.parent_id = c.content_id
)
SELECT JSON_OBJECT('id' VALUE cte.content_id, 'parent_id' VALUE cte.parent_id, 'level' VALUE cte.content_level)
FROM cte
ORDER BY cte.content_level;
I want to create a (postgres) SQL query that returns a (JSON) object with dynamic keys. Therefore I have created this example tables with some values.
CREATE TABLE foods (
id SERIAL PRIMARY KEY,
name VARCHAR(100)
);
CREATE TABLE nutrients (
id SERIAL PRIMARY KEY,
name VARCHAR(100)
);
CREATE TABLE foods_nutrients (
food_id int REFERENCES foods(id) ON UPDATE CASCADE ON DELETE CASCADE,
nutrient_id int REFERENCES nutrients(id) ON UPDATE CASCADE ON DELETE CASCADE,
amount DECIMAL NOT NULL,
CONSTRAINT food_nutrient_pk PRIMARY KEY (food_id, nutrient_id)
);
INSERT INTO foods(name)
VALUES ('Apple'),
('Banana');
INSERT INTO nutrients(name)
VALUES ('Carbohydrates'),
('Protein'),
('Fat');
INSERT INTO foods_nutrients(food_id, nutrient_id, amount)
VALUES (1, 1, 14.0),
(1, 2, 0.3),
(1, 3, 0.2),
(2, 1, 23.7),
(2, 2, 1.1);
The result of the query should look like this JSON if possible. Or at least close enough. The main problem is the object with the IDs of the nutrients as keys, I think.
[
{
"id": 1,
"name": "Apple",
"nutrients": {
"1": 14.0,
"2": 0.3,
"3": 0.2
}
},
{
"id": 2,
"name": "Banana",
"nutrients": {
"1": 23.7,
"2": 1.1
}
}
]
Principally JSON_BUILD_OBJECT() function is needed to construct a combination between id, name and nutrients values. But nutrients requires one more operation which will be using JSON_OBJECT_AGG() in order to get a more complicated object. So, consider using
WITH fn1 AS
(
SELECT fn.food_id, f.name,
JSON_OBJECT_AGG( nutrient_id, amount ) AS nutrients
FROM foods_nutrients fn
JOIN foods f
ON fn.food_id=f.id
JOIN nutrients n
ON fn.nutrient_id=n.id
GROUP BY fn.food_id, f.name
)
SELECT JSON_AGG(
JSON_BUILD_OBJECT( 'id', food_id,
'name', name,
'nutrients', nutrients)
) AS js
FROM fn1
Demo
Btw, using JSONB equivalents of those functions along with JSONB_PRETTY() which nests our current result will yield a nice design as Demonstrated, just like the format within the question :
[
{
"id": 1,
"name": "Apple",
"nutrients": {
"1": 14.0,
"2": 0.3,
"3": 0.2
}
},
{
"id": 2,
"name": "Banana",
"nutrients": {
"1": 23.7,
"2": 1.1
}
}
]
You can try the below - DEMO Here
select row_to_json(fv)
from (
select food_id,f.name,json_agg(
json_build_object(nutrient_id ,amount)
)as nutrients
from foods_nutrients fn join foods f on fn.food_id=f.id
join nutrients n on fn.nutrient_id=n.id group by food_id,f.name
) fv
One of my column is jsonb and have value in the format. The value of a single row of column is below.
{
"835": {
"cost": 0,
"name": "FACEBOOK_FB1_6JAN2020",
"email": "test.user#silverpush.co",
"views": 0,
"clicks": 0,
"impressions": 0,
"campaign_state": "paused",
"processed":"in_progress",
"modes":["obj1","obj2"]
},
"876": {
"cost": 0,
"name": "MARVEL_BLACK_WIDOW_4DEC2019",
"email": "test.user#silverpush.co",
"views": 0,
"clicks": 0,
"impressions": 0,
"campaign_state": "paused",
"processed":"in_progress",
"modes":["obj1","obj2"]
}
}
I want to update campaign_info(column name) column's the inner key "processed" and "models" of the campaign_id is "876".
I have tried this query:
update safe_vid_info
set campaign_info -> '835' --> 'processed'='completed'
where cid = 'kiywgh';
But it didn't work.
Any help is appreciated. Thanks.
Is this what you want?
jsonb_set(campaign_info, '{876,processed}', '"completed"')
This updates the value at path "876" > "processed" with value 'completed'.
In your update query:
update safe_vid_info
set campaign_info = jsonb_set(campaign_info, '{876,processed}', '"completed"')
where cid = 'kiywgh';
Here is my first table question.
CREATE TABLE "question" (
"question_id" serial NOT NULL,
"question" TEXT NOT NULL UNIQUE,
"added_at" TIMESTAMP NOT NULL,
"question_marks" integer NOT NULL,
CONSTRAINT "question_pk" PRIMARY KEY ("question_id"))
Questions have many options, so I referring every option row with question_id
CREATE TABLE "option" (
"option_id" serial NOT NULL,
"option" TEXT NOT NULL,
"option_question_id" integer NOT NULL,
"option_correct" BOOLEAN NOT NULL,
CONSTRAINT "option_pk" PRIMARY KEY ("option_id"))
ALTER TABLE "option" ADD CONSTRAINT "option_fk1" FOREIGN KEY ("option_question_id") REFERENCES "question"("question_id") ON DELETE CASCADE;
Now, How can I update both tables in one query?
I building an API. The below-given output is for request. The request will response with question details and options for the question.
I am able to update question but questions have many options, How can I update options?
"questionDetails": [
{
"question_id": 30,
"question": "What is gravity of Saturn?",
"added_at": "2020-02-20T18:30:00.000Z",
"question_marks": 1
}
],
"options": [
{
"option_id": 19,
"option": "20",
"option_question_id": 30,
"option_correct": true
},
{
"option_id": 20,
"option": "30",
"option_question_id": 30,
"option_correct": false
},
{
"option_id": 21,
"option": "40",
"option_question_id": 30,
"option_correct": false
},
{
"option_id": 18,
"option": "400000000",
"option_question_id": 30,
"option_correct": false
}
]
}
Now Can I update this relation?
You can chain multiple operations together in a single query by using CTEs that have returning clauses.
with
__parent as(
update
my_schema.parent_table
set
col_1 = 'a',
col_2 = 'b'
where
id_col = 3
returning
id_col
)
update
my_schema.child_table
set
col_1 = 'c'
where
parent_id = (select id_col from __parent)
The same thing can be done for insert and delete statements.
Do note you actually need to select from the CTE in the following query, otherwise the statement within will not be executed.
I am trying to build my web app that will store the data on a PostgreSQL database server running on some location on Earth and will have users connecting from other locations, so probably different timezones and offsets than my servers.
I need to show the dates and times of the actions like, posts created, posts edited, comments submitted, etc. according to the each connecting user. This is just like the StackExchange. However I am running into problems of timezones and offsets as described as follows:
Everything seems working correct in my pgAdmin3 SQL Editor. When I write the query below in pgAdmin3 SQL Editor with set local time zone 'Europe/Oslo', for example, I get both the posts and tags table created_at fields correct with +2 offset in the output. In the output row, the created_at field of posts table is 2016-08-29 19:15:53.758+02 and for same row created_at for tags table is 2016-08-29T19:15:53.758+02:00.
However, when I put it in a route function in my Nodejs Express.js server with pg-promise as the connection lib, I only get the tags table created_at field correct with the time in Oslo with timezone offset appended as expected, I get created_at field of the posts table in UTC not as expected.
All timestamps are defined as timestamp(3) with time zone NOT NULL DEFAULT CURRENT_TIMESTAMP as shown below. Also, without the setting set local time zone, I get the same behaviour, for first table I get UTC time, for the latter I get timestamp with offset of server appended.
Does not the set local time zone directive bind all the query? What is the missing point in my approach?
an example query I use:
select
q.*, -- created_at timestamp (with time zone) is one of those columns
u.name as author,
u.reputation,
case when count(t.*)=0 then '[]' e json_agg(t.*) end as tags
from posts q
-- authors
join users u
on q.author_id = u.id
-- tags
left join post_has_tag p_h_t
on q.id = p_h_t.post_id
left join tags t
on p_h_t.tag_id = t.id
where q.post_type = 'question'
group by q.id, u.id;
An example express.js route function:
trialRoutes.get('/x', function (req, res) {
db.query(
`
--begin;
SET LOCAL TIME ZONE 'Europe/Oslo';
SELECT
q.*, -- created_at timestamp (with time zone) is already in here
u.name AS author,
u.reputation,
CASE WHEN count(t.*)=0 THEN '[]' ELSE json_agg(t.*) END as tags
FROM posts q
-- authors
JOIN users u
ON q.author_id = u.id
-- tags
left join post_has_tag p_h_t
on q.id = p_h_t.post_id
left join tags t
on p_h_t.tag_id = t.id
WHERE q.post_type = 'question'
group by q.id, u.id;
--commit;
`
)
.then(function (data) {
res.json(data)
})
.catch(function (error) {
console.log("/login, database quesry error.", error);
});
})
The result I get from Express.js http server with pg-promise. Note the different timestamps that actually should point same point in UTC, which is correctly done, and representation which is not correctly done:
[
{
"id": "7",
"created_at": "2016-08-29T21:02:04.153Z", // same point in time, different representation
"title": "AAAAAAAAAAA",
"text": "aaaaa aaaaaaa aaaaaa",
"post_url": "AAAAAAAAAAA",
"score": 0,
"author_id": 1,
"parent_post_id": null,
"post_type": "question",
"is_accepted": false,
"acceptor_id": null,
"timezone": "2016-08-29T20:02:04.153Z",
"author": "Faruk",
"reputation": 0,
"tags": [
{
"id": 4,
"created_at": "2016-08-29T23:02:04.153+02:00", // same point in time, different representation
"label": "physics",
"description": null,
"category": null
}
]
},
{
"id": "6",
"created_at": "2016-08-29T17:24:10.151Z",
"title": "Ignoring timezones altogether in Rails and PostgreSQL",
"text": "Ignoring timezones altogether in Rails and PostgreSQL",
"post_url": "Ignoring-timezones-altogether-in-Rails-and-PostgreSQL",
"score": 0,
"author_id": 2,
"parent_post_id": null,
"post_type": "question",
"is_accepted": false,
"acceptor_id": null,
"timezone": "2016-08-29T16:24:10.151Z",
"author": "Selçuk",
"reputation": 0,
"tags": [
{
"id": 3,
"created_at": "2016-08-29T19:24:10.151+02:00",
"label": "sql",
"description": null,
"category": null
}
]
}
]
The definition of the posts and tags tables used here:
-- questions and answers
CREATE TABLE posts
(
id bigserial PRIMARY KEY,
created_at timestamp(3) with time zone NOT NULL DEFAULT CURRENT_TIMESTAMP,
title character varying(100),
text text,
post_url character varying(100),
score integer DEFAULT 0,
author_id integer NOT NULL REFERENCES users (id),
parent_post_id integer REFERENCES posts (id),
post_type varchar(30),
is_accepted boolean DEFAULT FALSE,
acceptor_id integer REFERENCES users (id) DEFAULT NULL
--seen_by_parent_post_author boolean DEFAULT false
--view_count
--accepted_answer_id
--answer_count
);
CREATE TABLE tags
(
id bigserial PRIMARY KEY,
created_at timestamp(3) with time zone NOT NULL DEFAULT CURRENT_TIMESTAMP,
label character varying(30) NOT NULL,
description character varying(200),
category character varying(50)
);