Using jsonb_agg/jsonb_build_object to parse to inner structs - sql

Whenever I try to get (select/scan) the Groups (outer struct) along with their Collaborators (inner structs), I'm getting the following error :
// sql: Scan error on column index ..., name "collaborators": unsupported Scan, storing driver.Value type []uint8 into type *[]User
I'm using sqlx (with pgx driver).
the code to fetch from db is :
func (psql *Postgres) GetGroups(someParam string) ([]Group, error) {
groups := []Group{}
err := psql.db.Unsafe().Select(&groups, <the query ...>, someParam)
....
}
type Postgres struct {
db *sqlx.DB
config *config.PostgresDB
timeout time.Duration
}
This is the SQL query :
SELECT groups.id,
groups.title,
JSONB_AGG(JSONB_BUILD_OBJECT(
'id', u.id,
'first_name', u.first_name,
'last_name', u.last_name,
'user_pic_url', u.user_pic_url)) as collaborators
FROM groups
JOIN user_group_permissions p
ON p.group_id = groups.id
JOIN users u
ON u.id = p.user_id
These are the structs :
type Group struct {
Id string `json:"id" db:"id"`
Title string `json:"title" db:"title"`
Collaborators []User `json:"collaborators" db:"collaborators"`
}
type User struct {
Id string `json:"id" db:"id"`
FirstName string `json:"first_name" db:"first_name"`
LastName string `json:"last_name" db:"last_name"`
ProfilePhoto *string `json:"profile_photo" db:"user_pic_url"`
}
I have a simple Group table , a User table and table which represents all users with Permissions to the group :
CREATE TABLE groups (
id int UNIQUE NOT NULL generated always as identity,
title text
)
CREATE TABLE users (
id bigint UNIQUE NOT NULL generated always as identity,
first_name text NOT NULL,
last_name text NOT NULL,
user_pic_url text
)
CREATE TABLE user_group_permissions (
group_id unsigned_int,
user_id unsigned_bigint,
permission unsigned_smallint,
)
CREATE DOMAIN unsigned_smallint AS smallint
CHECK(VALUE >= 0 AND VALUE < 32767);
CREATE DOMAIN unsigned_int AS int
CHECK(VALUE >= 0 AND VALUE < 2147483647);
CREATE DOMAIN unsigned_bigint AS bigint
CHECK(VALUE >= 0 AND VALUE < 9223372036854775807);

import "encoding/json"
type Group struct {
Id string `json:"id" db:"id"`
Title string `json:"title" db:"title"`
Collaborators UserList `json:"collaborators" db:"collaborators"`
}
type User struct {
Id string `json:"id" db:"id"`
FirstName string `json:"first_name" db:"first_name"`
LastName string `json:"last_name" db:"last_name"`
ProfilePhoto *string `json:"profile_photo" db:"user_pic_url"`
}
type UserList []User
func (list *UserList) Scan(src interface{}) error {
var data []byte
switch v := src.(type) {
case []byte:
data = v
case string:
data = []byte(v)
default:
return nil // or return some error
}
return json.Unmarshal(data, list)
}

Related

Scan a PostgreSQL field (of ARRAY type) into a slice of Go structs

Let's say I have:
type User struct {
ID int64 `json:"id"
Posts []Post `json:"posts"
}
type Post struct {
ID int64 `json:"id"
Text string `json:"text"
}
The SQL query:
WITH temp AS (SELECT u.id AS user_id, p.id AS post_id, p.text AS post_text FROM users u JOIN posts p ON u.id=p.user_id)
SELECT user_id, ARRAY_AGG(ARRAY[post_id::text, post_text])
FROM temp
GROUP BY user_id
)
What I want is to scan rows from the query above into a slice of User objects:
import (
"context"
"fmt"
"github.com/jackc/pgx/v4/pgxpool"
"github.com/lib/pq"
)
var out []User
rows, _ := client.Query(context.Background(), query) // No error handling for brevity
for rows.Next() {
var u User
if err := rows.Scan(&u.ID, pq.Array(&u.Posts)); err != nil {
return
}
out = append(out, u)
}
Pretty much expectedly, the code above fails with:
pq: cannot convert ARRAY[4][2] to StringArray
This makes sense, but is there a way to read the SQL output into my slice of users?
Scanning of multi-dimensional arrays of arbitrary types, like structs, is not supported by lib/pq. If you want to scan such an array you'll have to parse and decode it yourself in a custom sql.Scanner implementation.
For example:
type PostList []Post
func (ls *PostList) Scan(src any) error {
var data []byte
switch v := src.(type) {
case string:
data = []byte(v)
case []byte:
data = v
}
// The data var holds the multi-dimensional array value,
// something like: {{"1","foo"}, {"2","bar"}, ...}
// The above example is easy to parse but too simplistic,
// the array is likely to be more complex and therefore
// harder to parse, but not at all impossible if that's
// what you want.
return nil
}
If you want to learn more about the PostgreSQL array representation syntax, see:
Array Input and Output Syntax
An approach that does not require you to implement a parser for PostgreSQL arrays would be to build and pass JSON objects, instead of PostgreSQL arrays, to array_agg. The result of that would be a one-dimensional array with jsonb as the element type.
SELECT user_id, array_agg(jsonb_build_object('id', post_id, 'text', post_text))
FROM temp
GROUP BY user_id
Then the implementation of the custom sql.Scanner just needs to delegate to lib/pq.GenericArray and another, element-specific sql.Scanner, would delegate to encoding/json.
type PostList []Post
func (ls *PostList) Scan(src any) error {
return pq.GenericArray{ls}.Scan(src)
}
func (p *Post) Scan(src any) error {
var data []byte
switch v := src.(type) {
case string:
data = []byte(v)
case []byte:
data = v
}
return json.Unmarshal(data, p)
}
type User struct {
ID int64 `json:"id"`
Posts PostList `json:"posts"`
}

golang unmarshal using db tags

I have a sql query which fetches the list of cities nested inside provinces nested inside countries
SELECT
C.*,
P.provinces
FROM
countries AS C
LEFT JOIN (
SELECT
P.country_id,
json_agg(json_build_object(
'id', P.id,
'name', P.name,
'slug', P.slug,
'cities', Ci.cities
)) AS provinces
FROM
provinces AS P
LEFT JOIN (
SELECT
Ci.province_id,
json_agg(json_build_object(
'id', Ci.id,
'name', Ci.name,
'slug', Ci.slug
)) AS cities
FROM
cities AS Ci
GROUP BY Ci.province_id
) AS Ci ON Ci.province_id = P.id
GROUP BY P.country_id
) AS P ON P.country_id = C.id
I am fetching this data into slice of countries
type Country struct {
Id int64 `json:"id" db:"id"`
ISOCode2 string `json:"isoCode2" db:"iso_code_2"`
ISOCode3 string `json:"isoCode3" db:"iso_code_3"`
ISONumCode string `json:"isoNumCode" db:"iso_num_code"`
Name string `json:"name" db:"name"`
Slug string `json:"slug" db:"slug"`
Provinces SliceProvince `json:"provinces" db:"provinces"`
}
type SliceProvince []Province
func (provinces *SliceProvince) Scan(src any) (err error) {
if src == nil {
return
}
var source []byte
switch src := src.(type) {
case []byte:
source = src
case string:
source = []byte(src)
default:
return fmt.Errorf("unsupported type in scan:%v", src)
}
err = json.Unmarshal(source, provinces)
return
}
type Province struct {
Id int64 `json:"id" db:"id"`
Name string `json:"name" db:"name"`
Slug string `json:"slug" db:"slug"`
Cities SliceCity `json:"cities" db:"cities"`
}
type SliceCity []City
func (cities *SliceCity) Scan(src any) (err error) {
if src == nil {
return
}
var source []byte
switch src := src.(type) {
case []byte:
source = src
case string:
source = []byte(src)
default:
return fmt.Errorf("unsupported type in scan")
}
err = json.Unmarshal(source, cities)
return
}
type City struct {
Id int64 `json:"id" db:"id"`
Name string `json:"name" db:"name"`
Slug string `json:"slug" db:"slug"`
}
Now, my main query is that in the scan methods for these models, I want to do unmarshalling using db tags instead of json tags. Is there any workaround which I can do for this.
I came up with a thought of unmarshalling it to a map, then change their keys from db tag ones to json tag ones, then marshal and unmarshal it to the corresponding struct. But when there would be nesting of models, or slices involved, that would increase the complexity

Select sum with joining table on gorm golang

Let assume i have 3 model like this
type Violation struct{
ID uint `gorm:"primaryKey"`
ViolationName string
ViolationPoint int
}
type Student struct{
ID uint `gorm:"primaryKey"`
StudentName string
}
type ViolationRecord struct{
ID uint `gorm:"primaryKey"`
ViolationID int
V Violation `gorm:"constraint:OnUpdate:CASCADE,OnDelete:SET NULL;foreignKey:ViolationID;references:ID"`
StudentID int
S Student `gorm:"constraint:OnUpdate:CASCADE,OnDelete:SET NULL;foreignKey:StudentID;references:ID"`
RecordDate time.Time
}
Everything gonna be fine for CRUD system, but the new request is show total of the violation record/ student and sum the violation point.
i have try this
type ViolationSummary struct {
count int
sum int
}
func(s *Server) ShowSummary(id int){
var vs ViolationSummary
s.DB.Table("violation_records").
Select("count(violation_records.id) as count, sum(violations.violation_point) as sum").
Joins("left join violations on violation_records.violation_id=violations.id").Where("student_id = ?",id).Scan(&vs)
fmt.Println(vs.count)
fmt.Println(vs.sum)
}
but this function always show 0 value
I Have solve my problem with this code, i hope it can help.
func(s *Server) ShowSummary(id int){
vr:= []models.ViolationRecord{}
var total int64 = 0
var point int64 = 0
err:= s.DB.Where("student_id = ?",id).Preload(clause.Associations).Find(&vr).Error
if err != nil {
fmt.println("Error on load data")
}
for _, element := range vr {
total += 1
point += int64(element.V.ViolationPoint)
}
fmt.println(total)
fmt.println(point)
}

Remove item of record without knowing all item

I have this simulation:
init : (Model, Cmd Msg)
init = ({ dog = List Dog }, Cmd.none)
type alias Dog =
{ name : String
, age : Int
, price : Float
, extra = List Extra
}
type alias Extra =
{ allergies : List String
, wishes : List String
}
[{ name = "Hot"
, age = 1
, price = 300.5
, extra = [{...}]
},
{ name = "Dog"
, age = 3
, price = 150.0
, extra = [{...}]
}]
And I want to remove only 'extras' of Dog, in determined part of the code:
[{ name = "Hot"
, age = 1
, price = 300.5
},
{ name = "Dog"
, age = 3
, price = 150.0
}]
I can do this by mapping the entire list and generating a new one by removing 'extra' occurrence:
removeExtraOfDogs dogList =
(dogList |> List.map (\dog ->
{ name = dog.name
, age = dog.age
, price = dog.price
}
))
but I want to make it dynamic to just pass the extra to remove, without having to know what variables there are in the type and recreate it
Elm used to have this feature but it was removed a while ago. But, based on your use case described in a comment, I don't think you need this feature. You can instead use extensible record feature of Elm to allow passing different records into a function as long as they contain a fixed set of fields.
For example, let's say you have two types with name and age fields and having an extra incompatible field:
type alias Foo = { name : String, age : Int, extra : List String }
type alias Bar = { name : String, age : Int, extra : Int }
You can define a function that takes a record with a name field of type String and age of type Int and any extra fields:
encode : { r | name : String, age : Int } -> String
encode record = record.name ++ "," ++ toString record.age
You'll can now pass both Foo and Bar to this function because they both satisfy the requirements of the type signature.

Extracting order type from sql parser

using global sql parser (gsp) for extracting column and sorting type from order sql query and extract and or from where condition
SELECT employee_id, dept, name, age, salary
FROM employee_info
WHERE dept = 'Sales' and ID=1
ORDER BY salary, age DESC,ID;
I can extracting column name but can extract order type
1- how can extract order type?
2- how can extract and , or from where sql?
If pSqlstmt is gsp_selectStatement * then you can do something like this:
if(pSqlstmt->orderbyClause != nullptr)
{
string sortType;
int colNumOrderBy = pSqlstmt->orderbyClause->items->length;
for(int i = 0; i < colNumOrderBy; i++)
{
gsp_orderByItem *field = reinterpret_cast<gsp_orderByItem *>(gsp_list_celldata(pSqlstmt->orderbyClause->items->head));
//get order by column name
char *sortCol = gsp_node_text(reinterpret_cast<gsp_node*>(field->sortKey));
if(field->sortToken== nullptr)
{
//ERROR
}
else
{
//find out sorting type (ASC/DESC)
sortType = sortType.substr(0,field->sortToken->nStrLen);
}
free(sortCol);
pSqlstmt->orderbyClause->items->head = pSqlstmt->orderbyClause->items->head->nextCell;
}
}