How can I fix "operator does not exist: text = uuid" when using Haskell's postgres-simple library to perform a multi-row insert? - sql

I am using the postgres-simple library to insert into the eligible_class_passes table. Which is essentially a join table representing a many to many relationship.
I am using the executeMany function from the postgres-simple to do a multi row insert.
updateEligibleClassPasses :: Connection -> Text -> Text -> [Text] -> IO Int64
updateEligibleClassPasses conn tenantId classTypeId classPassTypeIds =
withTransaction conn $ do
executeMany
simpleConn
[sql|
INSERT INTO eligible_class_passes (class_type_id, class_pass_type_id)
SELECT upd.class_type_id::uuid, upd.class_pass_type_id::uuid
FROM (VALUES (?, ?, ?)) as upd(class_type_id, class_pass_type_id, tenant_id)
INNER JOIN class_types AS ct
ON upd.class_type_id::uuid = ct.id
INNER JOIN subscription_types AS st
ON class_pass_type_id::uuid = st.id
WHERE ct.tenant_id = upd.tenant_id::uuid AND st.tenant_id = upd.tenant_id::uuid
|]
params
where
addParams classPassTypeId = (classTypeId, classPassTypeId, tenantId)
params = addParams <$> classPassTypeIds
When this function is executed with the correct parameters applied I get the following runtime error
SqlError {sqlState = "42883", sqlExecStatus = FatalError, sqlErrorMsg = "operator does not exist: text = uuid", sqlErrorDetail = "", sqlErrorHint = "No operator matches the given name and argument type(s). You might need to add explicit type casts."}
However, when translated to SQL without the parameter substitutions (?) the query works correctly when executed in psql.
INSERT INTO eligible_class_passes (class_type_id, class_pass_type_id)
SELECT upd.class_type_id::uuid, upd.class_pass_type_id::uuid
FROM (VALUES ('863cb5ea-7a68-41d5-ab9f-5344605de500', 'e9195660-fd48-4fa2-9847-65a0ad323bd5', '597e6d7a-092a-49be-a2ea-11e8d85d8f82')) as upd(class_type_id, class_pass_type_id, tenant_id)
INNER JOIN class_types AS ct
ON upd.class_type_id::uuid = ct.id
INNER JOIN subscription_types AS st
ON class_pass_type_id::uuid = st.id
WHERE ct.tenant_id = upd.tenant_id::uuid AND st.tenant_id = upd.tenant_id::uuid;
My schema is as follows
CREATE EXTENSION IF NOT EXISTS "uuid-ossp";
CREATE TABLE tenants (
id UUID NOT NULL DEFAULT uuid_generate_v4() PRIMARY KEY,
name text NOT NULL UNIQUE,
email text NOT NULL UNIQUE,
created_at timestamp with time zone NOT NULL default now(),
updated_at timestamp with time zone NOT NULL default now()
);
CREATE TABLE class_types (
id UUID NOT NULL DEFAULT uuid_generate_v4() PRIMARY KEY,
FOREIGN KEY (tenant_id) REFERENCES tenants (id),
created_at timestamp with time zone NOT NULL default now(),
updated_at timestamp with time zone NOT NULL default now()
);
CREATE TABLE class_pass_types (
id UUID NOT NULL DEFAULT uuid_generate_v4() PRIMARY KEY,
name TEXT NOT NULL,
tenant_id UUID NOT NULL,
price Int NOT NULL,
created_at timestamp with time zone NOT NULL default now(),
updated_at timestamp with time zone NOT NULL default now(),
FOREIGN KEY (tenant_id) REFERENCES tenants (id)
);
-- Many to many join through table.
-- Expresses class pass type redeemability against class types.
CREATE TABLE eligible_class_passes (
class_type_id UUID,
class_pass_type_id UUID,
created_at timestamp with time zone NOT NULL default now(),
updated_at timestamp with time zone NOT NULL default now(),
FOREIGN KEY (class_type_id) REFERENCES class_types (id) ON DELETE CASCADE,
FOREIGN KEY (class_pass_type_id) REFERENCES class_pass_types (id) ON DELETE CASCADE,
PRIMARY KEY (
class_type_id, class_pass_type_id
)
);

To help debug your issue, use formatQuery function, then you can see what kind of final query postgresql-simple is sending to the server.
Also, I'd recommend using UUID type from uuid-types package, instead of Text for the uuids. Using Text most likely hides some issues from you (which you'll hopefully see by using formatQuery.

Related

Best way to Store and Count available stock (PostgreSQL)

I am not sure how to proceed with counting the stock and storing the stock in generated invoices.
Live data: http://sqlfiddle.com/#!17/20ff4/4
Table articles there the static product info is stored:
create table articles
(
prod_article_pvt_uuid uuid default uuid_generate_v1() not null
constraint proxies_pk
primary key,
prod_article_pub_uuid uuid default uuid_generate_v1mc(),
prod_article_pub_id serial not null,
t timestamp with time zone default CURRENT_TIMESTAMP(6),
archived boolean default false,
cat integer,
prod_brand_id integer,
barcode bigint,
supplier_code varchar
);
This table opens a document, and defines the type of document:
create table documents
(
sale_doc_pvt_uuid uuid default uuid_generate_v1() not null
constraint documents_pk
primary key,
sale_doc_pub_uuid uuid default uuid_generate_v1mc(),
sale_doc_alpha varchar,
t timestamp with time zone default CURRENT_TIMESTAMP(6),
archived boolean default false,
sale_type_id integer not null,
frozen boolean default false
);
create table types
(
sale_type_id serial not null
constraint types_pk
primary key,
name varchar,
draft boolean default false
);
Where each artricle is added as an item of the document:
create table items
(
sale_unit_pvt_uuid uuid default uuid_generate_v1() not null
constraint units_pk
primary key,
sale_unit_pub_uuid uuid default uuid_generate_v1mc(),
t timestamp with time zone default CURRENT_TIMESTAMP(6),
archived boolean default false,
sale_doc_pvt_uuid uuid not null,
prod_article_pvt_uuid uuid not null,
quantity numeric(16,6) not null
);
These are the available products in the warehouse:
create table products
(
whse_prod_pvt_uuid uuid default uuid_generate_v1() not null,
whse_prod_pub_uuid uuid default uuid_generate_v1mc(),
t_created timestamp with time zone default CURRENT_TIMESTAMP(6),
prod_article_pvt_uuid uuid not null,
quantity numeric(16,6) default 1,
global_ctry_id integer
);
I am trying to query this like so:
SELECT
(sum(products.quantity) - COALESCE(_items.quantity, 0)) as quantity
,articles.prod_article_pub_id as article
FROM products
LEFT JOIN articles ON products.prod_article_pvt_uuid = articles.prod_article_pvt_uuid
LEFT JOIN (
SELECT DISTINCT ON (items.prod_article_pvt_uuid)
items.prod_article_pvt_uuid
,items.sale_doc_pvt_uuid
,sum(items.quantity) as quantity
FROM items
LEFT JOIN documents ON items.sale_doc_pvt_uuid = documents.sale_doc_pvt_uuid
LEFT JOIN types ON documents.sale_type_id = types.sale_type_id
WHERE
items.archived = false
AND documents.archived = false
AND types.draft = false
GROUP BY
items.prod_article_pvt_uuid
,items.sale_doc_pvt_uuid
) AS _items ON _items.prod_article_pvt_uuid = products.prod_article_pvt_uuid
GROUP BY
articles.prod_article_pub_id
,_items.quantity
ORDER BY
article ASC,
quantity DESC
It does give me the desired result. But I am wondering if this is the correct way at going about it, cause it looks like it might struggle with millions of rows.
Basically each time I need to display the quantity available in stock I have to count every single document ever generated.
Is this a good idea?
Will it be a performance bottle neck with millions of rows?

Left join returns duplicate rows

I am just learning SQL and I'm really struggling to understand why my left join is returning duplicate rows. This is the query I'm using:
SELECT "id", "title"
FROM "posts"
LEFT JOIN "comments" "comment"
ON "comment"."post_id"="id" AND ("comment"."status" = 'hidden')
It returns 4 rows, but should only return 3. Two of the returned rows contain are duplicate (same values). I can fix this by using the DISTINCT prefix on "id".
SELECT DISTINCT "id", "title"
FROM "posts"
LEFT JOIN "comments" "comment"
ON "comment"."post_id"="id" AND ("comment"."status" = 'hidden')
The query returns 3 rows and I get desired result. But I'm still wondering why in the world I would get a duplicate row from the first query in the first place? I'm trying to write an aggregation query and this seems to be the issue I'm having.
I'm using PostgreSQL.
More specific: (as created by my ORM)
Shift DDL
CREATE TABLE shift (
id uuid DEFAULT uuid_generate_v4() PRIMARY KEY,
"gigId" uuid REFERENCES gig(id) ON DELETE CASCADE,
"categoryId" uuid REFERENCES category(id),
notes text,
"createdAt" timestamp without time zone NOT NULL DEFAULT now(),
"updatedAt" timestamp without time zone NOT NULL DEFAULT now(),
"salaryFixed" numeric,
"salaryHourly" numeric,
"salaryCurrency" character varying(3) DEFAULT 'SEK'::character varying,
"staffingMethod" character varying(255) NOT NULL DEFAULT 'auto'::character varying,
"staffingIspublished" boolean NOT NULL DEFAULT false,
"staffingActivateon" timestamp with time zone,
"staffingTarget" integer NOT NULL DEFAULT 0
);
ShiftEmployee DDL
CREATE TABLE "shiftEmployee" (
"employeeId" uuid REFERENCES employee(id) ON DELETE CASCADE,
"shiftId" uuid REFERENCES shift(id) ON DELETE CASCADE,
status character varying(255) NOT NULL,
"updatedAt" timestamp without time zone NOT NULL DEFAULT now(),
"salaryFixed" numeric,
"salaryHourly" numeric,
"salaryCurrency" character varying(3) DEFAULT 'SEK'::character varying,
CONSTRAINT "PK_6acfd2e8f947cee5a62ebff08a5" PRIMARY KEY ("employeeId", "shiftId")
);
Query
SELECT "id", "staffingTarget" FROM "shift" LEFT JOIN "shiftEmployee" "se" ON "se"."shiftId"="id" AND ("se"."status" = 'confirmed');
Result
id staffingTarget
68bb0892-9bce-4d08-b40e-757cb0889e87 3
12d88ff7-9144-469f-8de5-3e316c4b3bbd 6
73c65656-e028-4f97-b855-43b00f953c7b 5
68bb0892-9bce-4d08-b40e-757cb0889e88 3
e3279b37-2ba5-4f1d-b896-70085f2ba345 4
e3279b37-2ba5-4f1d-b896-70085f2ba346 5
e3279b37-2ba5-4f1d-b896-70085f2ba346 5
789bd2fb-3915-4cda-a3d7-2186cf5bb01a 3
If a post has more than one hidden comment, you will see that post multiple times because a join returns one row for each match - that's the nature of a join. And an outer join doesn't behave differently.
If your intention is to list only posts with hidden comments, it's better to use an EXISTS query instead:
SELECT p.id, p.title
FROM posts p
where exists (select *
from comments c
where c.post_id = p.id
and c.status = 'hidden');

operator is not unique. Could not choose a best candidate operator. You might need to add explicit type casts

I have this query that just selects the difference between two dates:
select date_part('day', 'toDate' - 'fromDate') from "Reservation" where "reservationID" = 1;
but this is giving me this error:
operator is not unique. Could not choose a best candidate operator.
You might need to add explicit type casts.
How can I fix this?
P.S. This is how I created "Reservations" table:
CREATE TABLE "Reservation"
(
"reservationID" serial NOT NULL,
"fromDate" timestamp without time zone NOT NULL DEFAULT '- infinity'::timestamp without time zone,
"toDate" timestamp without time zone NOT NULL DEFAULT '-infinity'::timestamp without time zone,
"staffID" integer NOT NULL DEFAULT 0,
"customerID" integer NOT NULL DEFAULT 0,
"roomID" integer NOT NULL DEFAULT 0,
CONSTRAINT "PK_public.Reservation" PRIMARY KEY ("reservationID"),
CONSTRAINT "FK_public.Reservation_public.Customer_customerID" FOREIGN KEY ("customerID")
REFERENCES "Customer" ("customerID") MATCH SIMPLE
ON UPDATE NO ACTION ON DELETE CASCADE,
CONSTRAINT "FK_public.Reservation_public.Room_roomID" FOREIGN KEY ("roomID")
REFERENCES "Room" ("roomID") MATCH SIMPLE
ON UPDATE NO ACTION ON DELETE CASCADE,
CONSTRAINT "FK_public.Reservation_public.Staff_staffID" FOREIGN KEY ("staffID")
REFERENCES "Staff" ("staffID") MATCH SIMPLE
ON UPDATE NO ACTION ON DELETE CASCADE
)
You cant't use ' you need " instead for columns name
select date_part('day', "toDate" - "fromDate") from "Reservation" where "reservationID" = 1;

Can't serialize transient record type postgres

I am trying to make my calculation dynamic based on certain criteria as below, but when I try to send the fields dynamically in to my calculation logic, it fails with the error " Can't serialize transient record type":
Create table statement :
create table calculation_t(
Id serial,
product_id integer not null,
metric_id integer not null,
start_date date,
end_date date,
calculation_logic varchar(50),
insert_timestamp timestamp default current_timestamp,
CONSTRAINT calculation_pk PRIMARY KEY(Id),
CONSTRAINT calculation_pid_fk FOREIGN KEY(product_id) REFERENCES Product_T(Product_id),
CONSTRAINT calc_mid_fk FOREIGN KEY(metric_id) REFERENCES metric_T(metric_id)
);
Insert statement :
insert into calculation_t(product_id,metric_id,calculation_logic)
select a.product_id,b.metric_id,
(case when b.metric_id=2 then
('$1-$2') else
'$1/$2' end) calc
from product_t a,metric_t b
Select statement which throws the mentioned error :
select *,(1,2,calculation_logic) from calculation_t
Note : I am using Greenplum database.
Try to remove parenthesis form your query:
select *,1,2,calculation_logic from calculation_t
It worked for me.
Thanx,

How can I speed up the following sql query? [duplicate]

This question already exists:
Closed 11 years ago.
Possible Duplicate:
How can I speed up the following update query?
I would like to run the following query in an acceptable time (e.g. max 15 mins) on a modern desktop computer running postgresql 8.4:
UPDATE cap_options_rule_items
SET cap_option_id = O.id
FROM cap_options_rule_items I
JOIN cap_options_rules R
ON R.id = I.cap_options_rule_id
JOIN cap_options O
ON R.cap_engine_id = O.cap_engine_id
AND O.code = I.cap_option_code;
I would like to know if there are obvious mistakes I'm doing in the query on with the choice of indexes.
The tables in the query have the following number of records:
cap_options_rule_item: 2208705
cap_options_rule: 430268
cap_options: 1628188
And the following schema (including indexes)
-- Table: cap_options_rule_items
CREATE TABLE cap_options_rule_items
(
id serial NOT NULL,
cap_options_rule_id integer,
cap_option_code integer,
"primary" boolean,
cap_option_id integer,
CONSTRAINT cap_options_rule_items_pkey PRIMARY KEY (id)
)
WITH (
OIDS=FALSE
);
-- Index: index_cap_options_rule_items_on_cap_option_id
CREATE INDEX index_cap_options_rule_items_on_cap_option_id
ON cap_options_rule_items
USING btree (cap_option_code);
-- Index: index_cap_options_rule_items_on_cap_option_rule_id
CREATE INDEX index_cap_options_rule_items_on_cap_option_rule_id
ON cap_options_rule_items
USING btree (cap_options_rule_id);
-- Table: cap_options_rules
CREATE TABLE cap_options_rules
(
id serial NOT NULL,
rule_type character varying(255),
cap_engine_id integer,
CONSTRAINT cap_options_rules_pkey PRIMARY KEY (id)
) WITH ( OIDS=FALSE
);
-- Index: index_cap_options_rules_on_cap_engine_id
CREATE INDEX index_cap_options_rules_on_cap_engine_id
ON cap_options_rules
USING btree (cap_engine_id);
-- Table: cap_options
CREATE TABLE cap_options
( id serial NOT NULL,
description character varying(255),
cap_engine_id integer,
cap_option_category_id integer,
basic_price numeric,
vat numeric,
default_option boolean,
created_at timestamp without time zone,
updated_at timestamp without time zone,
code integer,
CONSTRAINT cap_options_pkey PRIMARY KEY (id)
) WITH ( OIDS=FALSE
);
-- Index: index_code_and_cap_engine_id_on_cap_options
CREATE INDEX index_code_and_cap_engine_id_on_cap_options
ON cap_options
USING btree (code, cap_engine_id);
Thank you!
Your query is slow because you are updating all the rows in cap_options_rule_items.
I think you really want something like this:
UPDATE cap_options_rule_items I
SET cap_option_id = O.id
FROM cap_options_rules R
join cap_options O on R.cap_engine_id = O.cap_engine_id
WHERE I.cap_options_rule_id = R.id
and I.cap_option_code = O.code;