create table with system date in Vertica - sql

I am looking to create table from a table something like:
CREATE TABLE as archive.POSTPAID_GSMIS_`date +%Y%m%d%H%M%S`
(
select * from
POSTPAID.STAGE_GS10);
commit;
Wondering if I can even do it in Vertica?
I thought of storing value in a variable like:
\set x 'select now();'
create table :x (int a);
But \echo :x gives me select now();.

see the code bellow
dbadmin=> \set date `date +%Y%m%d%H%M%S`
dbadmin=> \echo :date
20170815112242
CREATE TABLE dba.POSTPAID_GSMIS_:date
as
select '1234' as id from dual
;
CREATE TABLE
dbadmin=> select * from dba.POSTPAID_GSMIS_:date;
id
------
1234
(1 row)
is this what you are looking for ?
Append Unix timestamp to a tablename:
dbadmin=> \set env `date +%s`
dbadmin=> \echo :env
1502843933
dbadmin=> create table dba.tbl_:env (id int);
CREATE TABLE
dbadmin=> select * from dba.tbl_1502843933;
id
----
(0 rows)
Also you can build you variable and use it on table create
dbadmin=> \set var `var="blabla" && echo $var`
dbadmin=> \echo :var
blabla
dbadmin=> create table dba.tbl_:var (id int);
CREATE TABLE
dbadmin=> select * from dba.tbl_blabla;
id
----
(0 rows)

Related

Postgres: reading the results of 'returning *' into a variable

All the examples I have found for the Postgres 'returning' functionality (https://www.postgresql.org/docs/current/dml-returning.html) return values for a single row.
How do I read multiple result rows into a variable?
Executing the following outside a function gives the desired results:
create sequence core.test_id_seq start with 10000;
create table core.test (
test_id integer not null default nextval('core.test_id_seq'),
field integer not null
);
insert into core.test ( field )
select unnest( array[1, 2] ) as id
returning *
;
test_id | field
---------+-------
10000 | 1
10001 | 2
(2 rows)
But I want to read the results into a variable or table to work with:
do $$
declare
recs ??;
begin
create sequence core.test_id_seq start with 10000;
create table core.test (
test_id integer not null default nextval('core.test_id_seq'),
field integer not null
);
insert into core.test ( field )
select unnest( array[1, 2] ) as id
returning * into recs
;
end $$;
Is this possible?
Thanks
You need to use an array of integers:
do $$
declare
new_ids int[];
begin
with new_rows as (
insert into core.test ( field )
select unnest( array[1, 2] ) as id
returning *
)
select array_agg(field)
into new_ids
from new_rows;
... work with the new_ids array ...
end
$$;

SQL: Alter tables returned from a select statement

How to alter the tables and drop constraints returned as strings from a Select statement?
Let's say I have a database that contains tables: tblA, tblB, tblC, among others.
If I run a query to select tables that meet a certain criteria, it returns the following:
+------------+------------+
| table_name | constraints|
+------------+------------+
| tblA | consX |
+------------+------------+
| tblB | consY |
+------------+------------+
| tblC | consZ |
+------------+------------+
Now I want to alter each table and drop their constraint. So for example instead of having to write a select statement, get the results, and type this in:
alter table tblA drop constraint consX;
alter table tblB drop constraint consY;
alter table tblC drop constraint consZ;
How can I do all those steps in one query?
Thanks.
You can do something like this:
with table_list as
( select 'Foo1' tname from dual
union
select 'Foo2' tname from dual
)
select 'alter table ' || tname || ' drop constraint ' || constraint_name || ';'
from user_constraints, table_list
where user_constraints.table_name = tname;
this will create a string built as an alter statement for each table/constraint. Add
the table names to query in the WITH clause. Note that the USER_CONSTRAINTS only lists constraints for the current schema.
Using PL/SQL, I would do something like this:
DECLARE
CURSOR c_query IS --REPLACE THIS WITH YOUR ACTUAL QUERY
SELECT tableName, conName
FROM dual;
BEGIN
FOR rec IN c_query LOOP
EXECUTE IMMEDIATE 'ALTER TABLE '||rec.tableName||' DROP CONSTRAINT '||rec.conName;
END LOOP;
END;
/

Get part of the query from a text column

Suppose i have a text column in some table. I can select a value from this column with
(SELECT textcolumn from sometable WHERE ....) and query result will be '5>3'.
Now i want to add this result to my other query.
UPDATE someothertable SET somecolumn WHERE (SELECT textcolumn from sometable WHERE ....)
I want this to act like
UPDATE someothertable SET somecolumn WHERE 5>3
How can i do this in postgresql?
smth like:
do
$$
begin
execute format('UPDATE someothertable
SET somecolumn
WHERE %s',(SELECT textcolumn from sometable WHERE ....));
end;
$$
;
example:
t=# create table ta(i int);
CREATE TABLE
t=# create table tb(t text);
CREATE TABLE
t=# insert into ta select 1;
INSERT 0 1
t=# insert into tb select 'i < 9';
INSERT 0 1
t=# do
$$
begin
execute format('UPDATE ta
SET i = 2
WHERE %s',(SELECT t from tb));
end;
$$
;
DO
t=# select * from ta;
i
---
2
(1 row)
I don't think this is a good idea though...

How to insert same random value into two columns within one SQL Statement (Oracle)?

My example doesn't work:
INSERT INTO test_table
(column_1, column_2)
VALUES (DBMS_CRYPTO.RANDOMBYTES(16), column_1);
I'm not interested in PL/SQL solutions.
One option is to use scalar subquery caching:
INSERT INTO test_table (column_1, column_2)
SELECT random, random FROM
(SELECT (SELECT dbms_crypto.randombytes(16) FROM dual) random FROM dual);
Or using PL/SQL:
DECLARE
random RAW(16) := dbms_crypto.randombytes(16);
BEGIN
INSERT INTO test_table (column_1, column_2) VALUES (random, random);
END;
Use a subquery:
INSERT INTO test_table(column_1, column_2)
SELECT val, val
FROM (SELECT DBMS_CRYPTO.RANDOMBYTES(16) as val FROM dual) x;
Or, this can be written as:
INSERT INTO test_table(column_1, column_2)
WITH x AS (SELECT DBMS_CRYPTO.RANDOMBYTES(16) as val FROM dual)
SELECT val, val
FROM x;
You could create a temp table to store it?
set echo on
create table test_table (column_1 varchar2(100), column_2 varchar2(100));
create global temporary table rando_val (c1 varchar2(100));
insert
into rando_val
values ( dbms_crypto.randombytes(16) );
INSERT INTO test_table (column_1, column_2)
select c1,
c1
from rando_val;
commit;
select *
from test_table;
Not all that elegant, but it should work.
Try this weird query
select /*+ NO_XML_QUERY_REWRITE */ UTL_RAW.CAST_TO_RAW(VAL1),UTL_RAW.CAST_TO_RAW(VAL2) from
xmltable('for $c in . return <r><val1>{$c}</val1><val2>{$c}</val2></r>'
passing (SELECT UTL_RAW.CAST_TO_VARCHAR2(DBMS_CRYPTO.RANDOMBYTES(16)) as val FROM dual)
columns
"VAL1" varchar2(300) path '/r/val1',
"VAL2" varchar2(300) path '/r/val2' )
If you want then don't covert varchar2 to raw again in last step. Mean this
UTL_RAW.CAST_TO_RAW(VAL1)
This solution works for me:
INSERT INTO test_table(column_1, column_2)
WITH x AS (SELECT /*+ MATERIALIZE */ DBMS_CRYPTO.RANDOMBYTES(16) as val FROM dual)
SELECT val, val
FROM x;
Thanks to Husqvik & Gordon Linoff.

Is there a way to easily select a column of Unix time to dates in the format of 'yyyy-mm-dd' using SQL?

Ideally I would like to use SELECT and have the column of unix time show up as dates. Thanks!
The way I understood your question is that you already have a column with a Unix timestamp and you wish to format it in a SELECT statement:
dbadmin=> CREATE TABLE tms (unix_time int);
CREATE TABLE
dbadmin=> INSERT INTO tms (unix_time) VALUES (EXTRACT(EPOCH FROM SYSDATE));
OUTPUT
--------
1
(1 row)
dbadmin=> INSERT INTO tms (unix_time) VALUES (EXTRACT(EPOCH FROM SYSDATE));
OUTPUT
--------
1
(1 row)
dbadmin=> INSERT INTO tms (unix_time) VALUES (EXTRACT(EPOCH FROM SYSDATE));
OUTPUT
--------
1
(1 row)
dbadmin=> COMMIT;
COMMIT
dbadmin=> SELECT * FROM tms;
unix_time
------------
1437575121
1437575126
1437575128
(3 rows)
dbadmin=> SELECT TO_TIMESTAMP(unix_time) FROM tms;
TO_TIMESTAMP
---------------------
2015-07-22 09:25:21
2015-07-22 09:25:26
2015-07-22 09:25:28
(3 rows)