My (simplified) SQLite tables are like this:
create table customers (
id integer primary key autoincrement,
contact_name text,
billaddr_id integer references addresses(id)
);
create table addresses (
id integer primary key autoincrement,
address text
);
And here are the result classes (generated from the sql by dbicdump):
Test::DB::Schema::Result::Customer->table("customers");
Test::DB::Schema::Result::Customer->add_columns(
"id",
{ data_type => "integer", is_auto_increment => 1, is_nullable => 0 },
"contact_name",
{ data_type => "text", is_nullable => 1 },
"billaddr_id",
{ data_type => "integer", is_foreign_key => 1, is_nullable => 1 },
);
Test::DB::Schema::Result::Customer->set_primary_key("id");
Test::DB::Schema::Result::Address->table("addresses");
Test::DB::Schema::Result::Address->add_columns(
"id", { data_type => "integer", is_auto_increment => 1, is_nullable => 0 },
"address", { data_type => "text", is_nullable => 1 },
);
Test::DB::Schema::Result::Address->set_primary_key("id");
Test::DB::Schema::Result::Address->has_many(
"customers",
"Test::DB::Schema::Result::Customer",
{ "foreign.billaddr_id" => "self.id" },
{ cascade_copy => 0, cascade_delete => 0 },
);
Test::DB::Schema::Result::Customer->belongs_to(
"billaddr",
"Test::DB::Schema::Result::Address",
{ id => "billaddr_id" },
{
is_deferrable => 0,
join_type => "LEFT",
on_delete => "NO ACTION",
on_update => "NO ACTION",
},
);
This bit of code:
my $data = {
contact_name => 'Jim Customer',
billaddr => {
address => 'Address...',
},
};
my $newcustomer = $c->schema->resultset('Customer')->create($data);
results in this database update:
SELECT me.id, me.address FROM addresses me WHERE ( ( me.address = ? ) ): 'Address...'
BEGIN WORK
SELECT me.id, me.address FROM addresses me WHERE ( ( me.address = ? ) ): 'Address...'
INSERT INTO addresses ( address ) VALUES ( ? ): 'Address...'
INSERT INTO partners ( billaddr_id, contact_name ) VALUES ( ?, ? ) : '10', 'Jim Customer'
COMMIT
Why does it do a select before the insert? Because it's checking to see if an address with the same value of the 'address' column already exists. If it does exist, the ID of that address is reused, like this:
SELECT me.id, me.address FROM addresses me WHERE ( ( me.address = ? ) ): 'Address...'
INSERT INTO partners ( billaddr_id, contact_name ) VALUES ( ?, ? ): '10', 'Another Customer with the same address'
But that's not what I want! I want separate addresses for separate customers, even if they happen to live in the same place at the moment.
How can I make DBIx::Class create a new row in the addresses table every time?
Thanks to abraxxa's comments, I've been pointed in the right direction and have done more reading and testing with DBIx::Class:Schema.
Generating the table from the Schema classes, rather than the other way round, seems like the way to go, especially if it will make future upgrades to the database easier.
I've boiled the problem down to the following example code:
Test.pl:
#!/usr/bin/perl
use Test::DB::Schema;
my $schema = Test::DB::Schema->connect(
"dbi:SQLite:dbname=dbicsl_test.db", '', '', {}
);
$schema->deploy({ add_drop_table => 1 } , '.');
$schema->storage->debug(1);
my $data1 = {
text => 'Fred',
table2 => {
text => 'abc',
}
};
my $new1 = $schema->resultset('Table1')->create($data1);
my $data2 = {
text => 'Jim',
table2 => {
text => 'xyz',
}
};
my $new2 = $schema->resultset('Table1')->create($data2);
my $data3 = {
text => 'Emily',
table2 => {
text => 'abc',
}
};
my $new3 = $schema->resultset('Table1')->create($data3);
Test::DB::Schema::Result::Table1.pm:
package Test::DB::Schema::Result::Table1;
use base 'DBIx::Class::Core';
__PACKAGE__->table("table1");
__PACKAGE__->add_columns(
"id",
{ data_type => "integer", is_auto_increment => 1, is_nullable => 0 },
"text",
{ data_type => "text", is_nullable => 1 },
"table2_id",
{ data_type => "integer", is_foreign_key => 1, is_nullable => 0 },
);
__PACKAGE__->set_primary_key("id");
__PACKAGE__->has_one(
table2 =>
"Test::DB::Schema::Result::Table2",
{ 'foreign.id' => 'self.table2_id' },
);
1;
Test::DB::Schema::Result::Table2:
package Test::DB::Schema::Result::Table2;
use base 'DBIx::Class::Core';
__PACKAGE__->table("table2");
__PACKAGE__->add_columns(
"id",
{ data_type => "integer", is_auto_increment => 1, is_nullable => 0 },
"text",
{ data_type => "text", is_nullable => 0 },
);
__PACKAGE__->set_primary_key("id");
1;
And here's the output:
SELECT me.id, me.text FROM table2 me WHERE ( me.text = ? ): 'abc'
BEGIN WORK
SELECT me.id, me.text FROM table2 me WHERE ( me.text = ? ): 'abc'
INSERT INTO table2 ( text) VALUES ( ? ): 'abc'
INSERT INTO table1 ( table2_id, text) VALUES ( ?, ? ): '1', 'Fred'
COMMIT
SELECT me.id, me.text FROM table2 me WHERE ( me.text = ? ): 'xyz'
BEGIN WORK
SELECT me.id, me.text FROM table2 me WHERE ( me.text = ? ): 'xyz'
INSERT INTO table2 ( text) VALUES ( ? ): 'xyz'
INSERT INTO table1 ( table2_id, text) VALUES ( ?, ? ): '2', 'Jim'
COMMIT
SELECT me.id, me.text FROM table2 me WHERE ( me.text = ? ): 'abc'
INSERT INTO table1 ( table2_id, text) VALUES ( ?, ? ): '1', 'Emily'
So the database now looks like
table1.id table1.text table1.table2_id
1 Fred 1
2 Jim 2
3 Emily 1
table2.id table2.text
1 abc
2 xyz
whereas I expected / hoped for:
table1.id table1.text table1.table2_id
1 Fred 1
2 Jim 2
3 Emily 3
table2.id table2.text
1 abc
2 xyz
3 abc
Why does it reuse 1/abc when I haven't told it to make the table2.text column unique?
Related
I have a JSON structure in a field that looks like this. I'm trying to extract every task in every category, there could be any number of tasks or categories.
I've got part of the way there by extracting a single category, but can't seem to do it for every task in every category.
"tasks": {
"category-business": [
{
"dateCompleted": {
"_seconds": 1653672655,
"_nanoseconds": 791000000
},
"slug": "task-alpha",
"status": "completed"
},
{
"dateCompleted": {
"_seconds": 1654516259,
"_nanoseconds": 796000000
},
"slug": "task-bravo",
"status": "completed"
}
],"category-community": [
{
"dateCompleted": {
"_seconds": 1654709063,
"_nanoseconds": 474000000
},
"slug": "task-papa",
"status": "completed"
},
{
"dateCompleted": {
"_seconds": 1654709841,
"_nanoseconds": 764000000
},
"slug": "task-zebra",
"status": "completed"
}
]}
Here's the query so far
SELECT
*
FROM
(
SELECT
ARRAY(
SELECT
STRUCT(
TIMESTAMP_SECONDS(
CAST(
JSON_EXTRACT_SCALAR(business_tasks, '$.dateCompleted._seconds') AS INT64
)
) AS dateCompleted,
json_extract_scalar(business_tasks, '$.slug') AS task_slug,
json_extract_scalar(business_tasks, '$.status') AS status
)
FROM
UNNEST(
json_extract_array(DATA, '$.tasks.category-business')
) business_tasks
) AS items
FROM
`table`
)
This extracts just the information in the category business.
What I'm trying to do is expand category-community and any other children underneath the tasks key. The real data has at least 10 categories and 50 tasks.
I think I need to do another round of UNNEST and json_extract_array but I can't quite work out the correct order?
Consider below approach
create temp function get_keys(input string) returns array<string> language js as """
return Object.keys(JSON.parse(input));
""";
create temp function get_values(input string) returns array<string> language js as """
return Object.values(JSON.parse(input));
""";
create temp function get_leaves(input string) returns string language js as '''
function flattenObj(obj, parent = '', res = {}){
for(let key in obj){
let propName = parent ? parent + '.' + key : key;
if(typeof obj[key] == 'object'){
flattenObj(obj[key], propName, res);
} else {
res[propName] = obj[key];
}
}
return JSON.stringify(res);
}
return flattenObj(JSON.parse(input));
''';
create temp table temp_table as (
select
split(key, '.')[offset(0)] as category,
split(key, '.')[offset(1)] as offset,
split(key, '.')[offset(2)] || ifnull(split(key, '.')[safe_offset(3)], '') as key,
val, format('%t', t) row_id
from your_table t, unnest([struct(get_leaves(json_extract(data, '$.tasks')) as leaves)]),
unnest(get_keys(leaves)) key with offset
join unnest(get_values(leaves)) val with offset using(offset)
);
execute immediate (
select '''
select * except(row_id) from temp_table
pivot (any_value(val) for key in ("''' || keys || '"))'
from (
select string_agg(key, '","') keys
from (select distinct key from temp_table)
)
);
if applied to sample data in your question - output is
DML only:
with category_level as (
select
coalesce(
json_query_array(DATA.tasks[a], '$.category-business')
, json_query_array(DATA.tasks[a], '$.category-community')
, json_query_array(DATA.tasks[a], '$.category-3')
, json_query_array(DATA.tasks[a], '$.category-4')
, json_query_array(DATA.tasks[a], '$.category-5')
, json_query_array(DATA.tasks[a], '$.category-6')
, json_query_array(DATA.tasks[a], '$.category-7')
, json_query_array(DATA.tasks[a], '$.category-8')
, json_query_array(DATA.tasks[a], '$.category-9')
, json_query_array(DATA.tasks[a], '$.category-10')
) category_array
from table
left join unnest(generate_array(0, 100)) a
where DATA.tasks[a] is not null
)
select
timestamp_seconds(cast(json_extract_scalar(b.dateCompleted._seconds) as int64)) dateCompleted
, json_extract_scalar(b.slug) slug
, json_extract_scalar(b.status) status
from category_level
left join unnest(category_array) b
https://console.cloud.google.com/bigquery?sq=1013309549723:fe8b75122e5b4b549e8081df99584c81
new version:
select
timestamp_seconds(cast(regexp_extract_all(to_json_string(json_extract(DATA,'$.tasks')), r'"_seconds":(\d*)')[offset(a)] as int64)) dateCompleted
, regexp_extract_all(to_json_string(json_extract(DATA,'$.tasks')), r'"slug":"([a-z\-]*)"')[offset(a)] task_slug
, regexp_extract_all(to_json_string(json_extract(DATA,'$.tasks')), r'"status":"([a-z\-]*)"')[offset(a)] status
from table
join unnest(generate_array(0,-1+array_length(regexp_extract_all(to_json_string(json_extract(DATA,'$.tasks')), r'"slug":"([a-z\-]*)"')))) a
https://console.cloud.google.com/bigquery?sq=1013309549723:9f43bd653ba14589b31a1f5673adcda7
I have an sql query to get users and total
await db.query({
text: `with cte as
(select "id", "createdAt", "deletedAt", "role", "email", "name", "group" from "admin"
where (lower("name") like '%' || lower($1) || '%'))
select * from (table cte
order by
case when $2 = 'desc' then "createdAt" end desc,
case when $2 = 'asc' then "createdAt" end asc
limit $3
offset $4) sub
right join (select count(*) from cte) c(total) on true`,
values: [search, createdAt, limit, offset]
})
And this query return users as array of objects with total in each user object
[
{
id: '135e8d05-4723-4dd7-9ae0-6e10626799f3',
createdAt: 2022-03-27T08:34:37.636Z,
deletedAt: null,
role: 'root',
email: 'test#gmail.com',
name: 'Test Name',
group: 'Test Group,
total: '4'
}
]
What i should to do to get something like that
{
total: 4,
users: [
{
id: '135e8d05-4723-4dd7-9ae0-6e10626799f3',
createdAt: 2022-03-27T08:34:37.636Z,
deletedAt: null,
role: 'root',
email: 'test#gmail.com',
name: 'Test Name',
group: 'Test Group,
}
]
}
As I understand, you generate JSON data on PostgreSQL. In your query, you didn't write JSON generation codes. Ok, I wrote two samples for you:
Sample 1:
with cte as
(
select "id", "createdAt", "deletedAt", "role", "email", "name", "group" from "admin"
where (lower("name") like '%' || lower($1) || '%')
)
select jsonb_agg(row_to_json(t1)) from (
select * from (table cte
order by
case when $2 = 'desc' then "createdAt" end desc,
case when $2 = 'asc' then "createdAt" end asc
limit $3
offset $4) sub
right join (select count(*) from cte) c(total) on true
) t1
This query results
[
{
id: '135e8d05-4723-4dd7-9ae0-6e10626799f3',
createdAt: 2022-03-27T08:34:37.636Z,
deletedAt: null,
role: 'root',
email: 'test#mail.ru',
name: 'Test Name',
group: 'Test Group,
total: '4'
}
]
Sample 2: (You needed)
with cte as
(
select "id", "createdAt", "deletedAt", "role", "email", "name", "group" from "admin"
where (lower("name") like '%' || lower($1) || '%')
)
select row_to_json(t1) from (
select
c.total,
jsonb_agg(row_to_json(sub)) as "users"
from (table cte
order by
case when $2 = 'desc' then "createdAt" end desc,
case when $2 = 'asc' then "createdAt" end asc
limit $3
offset $4) sub
right join (select count(*) from cte) c(total) on true
group by c.total
) t1
This query results in your needed format:
{
total: 4,
users: [
{
id: '135e8d05-4723-4dd7-9ae0-6e10626799f3',
createdAt: 2022-03-27T08:34:37.636Z,
deletedAt: null,
role: 'root',
email: 'test#mail.ru',
name: 'Test Name',
group: 'Test Group,
}
]
}
you can use row_to_json & json_agg to convert all results to a json list of objects.
This won't give you exactly what you desire, but close. The query below should return results in the format [{total: xxx, users: [user-records]}], so in your application code you have to get the first row returned by the database query.
with cte as (
select
"id"
, "createdAt"
, "deletedAt"
, "role"
, "email"
, "name"
, "group"
from "admin"
where (lower("name") like '%' || lower($1) || '%')
)
select *
from (
select
json_agg(
row_to_json(cte.*)
order by
case
when $2 = 'desc' then "createdAt" desc
when $2 = 'asc' then "createdAt" asc
end
) users
from cte
order by
case
when $2 = 'desc' then "createdAt" desc
when $2 = 'asc' then "createdAt" asc
end
limit $3
offset $4
) sub
right join (
select count(*) from cte
) c(total)
on true
I have a large json file of data which I want to put into my database. Some of the object are duplicates, so I want to update the data in case the row is already in the database. Here is my code:
const FILE_PATH = path.join(__dirname, "../../files/apps.json");
const columns = [
"name",
"description",
"ext_id"
];
const myFile = fs.readFileSync(FILE_PATH, { encoding: "utf-8" });
const appData = await models.sequelize.query(
`
INSERT INTO data (${columns.join(", ")})
SELECT ${columns.join(", ")}
FROM (:path)
ON CONFLICT (ext_id)
DO UPDATE SET
${columns.map(col => `${col} = EXCLUDED.${col}`).join(", ")}
RETURNING ext_id;
`,
{ replacements: { path: FILE_PATH } }
);
As you can see, I want to read the file directly and put it into the database. I use a mapper called sequelize, but use a raw query in this case. My immediate problem is that I get this error:
syntax error at or near "'/home/blub/filePath'"
I don't really know how I should specify the path. I tried to parse it in directly, but then the program complained about the /. Any help here? In addition, I am also not sure whether the query is syntactically correct.
Here is a solution using CTE of postgres.
Versions:
"sequelize": "^5.21.3"
postgres:9.6
apps.json:
[
{
"name": "app-a",
"description": "app a desc",
"ext_id": 1
},
{
"name": "app-b",
"description": "app b desc",
"ext_id": 2
},
{
"name": "app-c",
"description": "app c desc",
"ext_id": 3
}
]
index.ts:
import { sequelize } from '../../db';
import { Model, DataTypes, QueryTypes } from 'sequelize';
import fs from 'fs';
import path from 'path';
class Data extends Model {}
Data.init(
{
name: DataTypes.STRING,
description: DataTypes.STRING,
ext_id: {
type: DataTypes.INTEGER,
unique: true,
},
},
{ sequelize, tableName: 'data' },
);
(async function test() {
try {
await sequelize.sync({ force: true });
const FILE_PATH = path.join(__dirname, './apps.json');
const columns = ['name', 'description', 'ext_id'];
const myFile = fs.readFileSync(FILE_PATH, { encoding: 'utf-8' });
const appData = await sequelize.query(
`
with app_json(doc) as (
values ('${myFile}'::json)
)
insert into data (${columns.join(', ')})
select ${columns.join(', ')}
from app_json l
cross join lateral json_populate_recordset(null::data, doc) as p
on conflict (ext_id) do update
set ${columns.map((col) => `${col} = EXCLUDED.${col}`).join(', ')}
returning ext_id;
`,
{ type: QueryTypes.INSERT },
);
console.log(appData);
} catch (error) {
console.log(error);
} finally {
await sequelize.close();
}
})();
The execution result:
Executing (default): DROP TABLE IF EXISTS "data" CASCADE;
Executing (default): DROP TABLE IF EXISTS "data" CASCADE;
Executing (default): CREATE TABLE IF NOT EXISTS "data" ("id" SERIAL , "name" VARCHAR(255), "description" VARCHAR(255), "ext_id" INTEGER UNIQUE, PRIMARY KEY ("id"));
Executing (default): SELECT i.relname AS name, ix.indisprimary AS primary, ix.indisunique AS unique, ix.indkey AS indkey, array_agg(a.attnum) as column_indexes, array_agg(a.attname) AS column_names, pg_get_indexdef(ix.indexrelid) AS definition FROM pg_class t, pg_class i, pg_index ix, pg_attribute a WHERE t.oid = ix.indrelid AND i.oid = ix.indexrelid AND a.attrelid = t.oid AND t.relkind = 'r' and t.relname = 'data' GROUP BY i.relname, ix.indexrelid, ix.indisprimary, ix.indisunique, ix.indkey ORDER BY i.relname;
Executing (default): with app_json(doc) as (
values ('[
{
"name": "app-a",
"description": "app a desc",
"ext_id": 1
},
{
"name": "app-b",
"description": "app b desc",
"ext_id": 2
},
{
"name": "app-c",
"description": "app c desc",
"ext_id": 3
}
]'::json)
)
insert into data (name, description, ext_id)
select name, description, ext_id
from app_json l
cross join lateral json_populate_recordset(null::data, doc) as p
on conflict (ext_id) do update
set name = EXCLUDED.name, description = EXCLUDED.description, ext_id = EXCLUDED.ext_id
returning ext_id;
[ [ { ext_id: 1 }, { ext_id: 2 }, { ext_id: 3 } ], 3 ]
Check the data records in the database:
id name description ext_id
1 app-a app a desc 1
2 app-b app b desc 2
3 app-c app c desc 3
Is there a way to create JSON object in Oracle, for parent child relationship data? For example an organizational structure. Table contains
EmpId Name Title ManagerId
1 John GM 0
2 Smith Manager 1
3 Jason Manager 1
4 Will IP1 3
5 Jade AM 3
6 Mark IP2 5
7 Jane AM2 5
8 Tamara M1 1
9 Dory M2 1
Something like below JSON object is expected.
{
'name': 'John',
'title': 'GM',
'children': [
{ 'name': 'Smith', 'title': 'manager' },
{ 'name': 'Jason', 'title': 'manager',
'children': [
{ 'name': 'Will', 'title': 'IP1' },
{ 'name': 'Jade', 'title': 'AM',
'children': [
{ 'name': 'Mark', 'title': 'IP2' },
{ 'name': 'Jane', 'title': 'AM2' }
]
}
]
},
{ 'name': 'Tamara', 'title': 'M1' },
{ 'name': 'Dory', 'title': 'M2' }
]
}
Oracle Database 12.2 does have a number of JSON generation functions. But these are of limited use. You need to build up the document recursively.
Which I believe requires a bit of hand-crafting.
First use a recursive query to create the org chart, adding which level each person is in the hierarchy.
Then build the JSON by:
If level for the next row is greater than the current, the employee is a manager. And you need to start a child array. Otherwise return a JSON object for the current row
If the current row is the last in the tree, you need to close N arrays and objects. N is how deep the row is in the tree minus one.
Otherwise if the next row is a lower level than the current, you need to close ( current level - next level ) arrays and objects
Then if the next level equals or is less than the current, add a comma
Which gives something like:
create table t (
EmpId int,
Name varchar2(10),
Title varchar2(10),
ManagerId int
);
insert into t values (1, 'John', 'GM' , 0 );
insert into t values (2, 'Smith', 'Manager' , 1 );
insert into t values (3, 'Jason', 'Manager' , 1 );
insert into t values (4, 'Will', 'IP1' , 3 );
insert into t values (5, 'Jade', 'AM' , 3 );
insert into t values (6, 'Mark', 'IP2' , 5 );
insert into t values (7, 'Jane', 'AM2' , 5 );
insert into t values (8, 'Tamar', 'M1' , 1 );
insert into t values (9, 'Dory', 'M2' , 1 );
commit;
with chart (
empid, managerid, name, title, lvl
) as (
select empid, managerid,
name, title, 1 lvl
from t
where empid = 1
union all
select t.empid, t.managerid,
t.name, t.title,
lvl + 1 lvl
from chart c
join t
on c.empid = t.managerid
) search depth first by empid set seq,
jdata as (
select case
/* The employee has reports */
when lead ( lvl ) over ( order by seq ) > lvl then
'{"name": "' || name ||
'", "title": "' || title ||
'", "children": ['
else
json_object ( 'name' value name, 'title' value title )
end ||
case
/* Close arrays & objects */
when lead ( lvl ) over ( order by seq ) is null then
lpad ( ']}', ( lvl - 1 ) * 2, ']}' )
when lead ( lvl ) over ( order by seq ) < lvl then
lpad ( ']}', ( lvl - lead ( lvl ) over ( order by seq ) ) * 2, ']}' )
end ||
case
/* Add closing commas */
when lead ( lvl ) over ( order by seq ) <= lvl then
','
end j,
lead ( lvl ) over ( order by seq ) nlvl,
seq, lvl
from chart
)
select json_query (
listagg ( j )
within group ( order by seq ),
'$' returning varchar2 pretty
) chart_json
from jdata;
CHART_JSON
{
"name" : "John",
"title" : "GM",
"children" :
[
{
"name" : "Smith",
"title" : "Manager"
},
{
"name" : "Jason",
"title" : "Manager",
"children" :
[
{
"name" : "Will",
"title" : "IP1"
},
{
"name" : "Jade",
"title" : "AM",
"children" :
[
{
"name" : "Mark",
"title" : "IP2"
},
{
"name" : "Jane",
"title" : "AM2"
}
]
}
]
},
{
"name" : "Tamar",
"title" : "M1"
},
{
"name" : "Dory",
"title" : "M2"
}
]
}
I'm trying to use a DBIx::Class ResultSet to build:
SELECT commitment_sub_activity_id, SUM(commitment_amount_in_usd)
FROM query_commitments_financials
GROUP BY (commitment_sub_activity_id)
Here's the Perl code I'm trying to use to do this:
my $sub_activity_commitments = $schema->resultset('QueryCommitmentsFinancial')->search(undef,
{
select => [
'commitment_sub_activity_id',
{ sum => 'commitment_amount_in_usd' }
],
as => [qw/commitment_sub_activity_id total_commitment_in_usd/],
'group_by' => [qw/commitment_sub_activity_id/],
});
As I understand it, the select attribute should prevent any unlisted fields from appearing in the SELECT statement. However, the SQL statement DBIx::Class creates comes out as:
SELECT
me.source_id, me.commitment_id,
me.commitment_obligation_id, me.commitment_sub_activity_id,
me.commitment_task_code, me.commitment_status,
me.commitment_date, me.commitment_currency_id,
me.commitment_amount, me.exchange_rate,
me.commitment_amount_in_usd, me.commitment_sub_activity_id,
SUM( commitment_amount_in_usd )
FROM query_commitments_financials me
GROUP BY commitment_sub_activity_id
This query causes my RDBMS to throw an error because those unexpected columns would all need to be included in the GROUP BY.
What is the correct way to get the DBIx::Class ResultSet to build this aggregate query?
UPDATE: Providing the ResultClass definition of QueryCommitmentsFinancial as requested. I used DBIx::Class::Schema::Loader to create it, so I removed the POD.
use utf8;
package IPMS::Schema::Result::QueryCommitmentsFinancial;
use strict;
use warnings;
use base 'DBIx::Class::Core';
__PACKAGE__->load_components("InflateColumn::DateTime");
__PACKAGE__->table_class("DBIx::Class::ResultSource::View");
__PACKAGE__->table("query_commitments_financials");
__PACKAGE__->add_columns(
"source_id",
{ data_type => "nvarchar", is_nullable => 1, size => 52 },
"commitment_id",
{ data_type => "nvarchar", is_nullable => 1, size => 51 },
"commitment_obligation_id",
{ data_type => "nvarchar", is_nullable => 1, size => 45 },
"commitment_sub_activity_id",
{ data_type => "integer", is_nullable => 0 },
"commitment_task_code",
{ data_type => "nchar", is_nullable => 0, size => 7 },
"commitment_status",
{ data_type => "nvarchar", is_nullable => 0, size => 10 },
"commitment_date",
{ data_type => "datetime", is_nullable => 1 },
"commitment_currency_id",
{ data_type => "integer", is_nullable => 0 },
"commitment_amount",
{ data_type => "money", is_nullable => 1 },
"exchange_rate",
{ data_type => "double precision", is_nullable => 1 },
"commitment_amount_in_usd",
{ data_type => "money", is_nullable => 1 },
);
1;