Liquibase error: "relation does not exist" when creating main table and table with foreign key in one change set - liquibase

I'm trying to create two tables with relation one to many using liquibase and postgresql in java springboot application. There is only one entry in changelog.
When starting application I receive this error:
liquibase.exception.DatabaseException: ERROR: relation "public.main" does not exist [Failed SQL: (0) CREATE TABLE public.relation (id BIGINT GENERATED BY DEFAULT AS IDENTITY NOT NULL, main_id BIGINT NOT NULL, CONSTRAINT "PK_relation_id" PRIMARY KEY (id), CONSTRAINT "FK_relation_main" FOREIGN KEY (main_id) REFERENCES public.main(id), UNIQUE (id))]
Tables definition:
{
"databaseChangeLog": [
{
"changeSet": {
"id": "initial-structure-create",
"author": "tcyborowski",
"changes": [
{
"createTable": {
"tableName": "main",
"columns": [
{
"column": {
"name": "id",
"type": "bigint",
"autoIncrement": true,
"constraints": {
"primaryKey": true,
"primaryKeyName": "PK_main_id",
"nullable": false,
"unique": true
}
}
},
{
"column": {
"name": "name",
"type": "varchar(255)",
"constraints": {
"nullable": false,
"unique": false
}
}
}
]
},
"createTable": {
"tableName": "relation",
"columns": [
{
"column": {
"name": "id",
"type": "bigint",
"autoIncrement": true,
"constraints": {
"primaryKey": true,
"primaryKeyName": "PK_relation_id",
"nullable": false,
"unique": true
}
}
},
{
"column": {
"name": "main_id",
"type": "bigint",
"constraints": {
"nullable": false,
"unique": false,
"referencedTableSchemaName": "public",
"referencedTableName": "main",
"referencedColumnNames": "id",
"foreignKeyName": "FK_relation_main"
}
}
}
]
}
}
]
}
}
]
}
liquidbase version 4.3.3 - the same problem is with h2 in-memory database
If I split this json tables declaration into two json change files, run first table creation and after that second table creation tables are created without problem with correct relation.
If I try to run this this on clean database I get the same error as above:
{
"databaseChangeLog": [
{
"include": {
"file": "db/changelog/partials/initial-structure.json"
},
"include": {
"file": "db/changelog/partials/initial-structure2.json"
}
}
]
}
What could be causing problem with running this in one go?

The problem is changeset is executed in a transaction manner. Changes will committed at the end of the changeset
My suggestion is to create two tables in two different changesets.
Liquibase attempts to execute each changeset in a transaction that is committed at the end, or rolled back if there is an error. Some databases will auto-commit statements which interferes with this transaction setup and could lead to an unexpected database state. Therefore, it is best practice to have just one change per changeset unless there is a group of non-auto-committing changes that you want to apply as a transaction such as inserting data.

Ok, I found my mistake after changing logging to debug.
In both my json files above there is an error which caused only last action to be executed.
Corrected json files below:
{
"databaseChangeLog": [
{
"include": {
"file": "db/changelog/partials/initial-structure.json"
}
},
{
"include": {
"file": "db/changelog/partials/initial-structure2.json"
}
}
]
}
{
"databaseChangeLog": [
{
"changeSet": {
"id": "initial-structure-create",
"author": "tcyborowski",
"changes": [
{
"createTable": {
"tableName": "main",
"columns": [
{
"column": {
"name": "id",
"type": "bigint",
"autoIncrement": true,
"constraints": {
"primaryKey": true,
"primaryKeyName": "PK_main_id",
"nullable": false,
"unique": true
}
}
},
{
"column": {
"name": "name",
"type": "varchar(255)",
"constraints": {
"nullable": false,
"unique": false
}
}
}
]
}
},
{
"createTable": {
"tableName": "relation",
"columns": [
{
"column": {
"name": "id",
"type": "bigint",
"autoIncrement": true,
"constraints": {
"primaryKey": true,
"primaryKeyName": "PK_relation_id",
"nullable": false,
"unique": true
}
}
},
{
"column": {
"name": "main_id",
"type": "bigint",
"constraints": {
"nullable": false,
"unique": false,
"referencedTableSchemaName": "public",
"referencedTableName": "main",
"referencedColumnNames": "id",
"foreignKeyName": "FK_relation_main"
}
}
}
]
}
}
]
}
}
]
}

Related

How to set Datatype in Additional Column in ADF

I need to set datatype for Additional Column with Dynamic Content in Sink in ADF
By default its taking nvarchar(max) from Json obj but I need bigInt
Below is a Json Obj which create table with Additional column
{
"source": {
"type": "SqlServerSource",
"additionalColumns": [
{
"name": "ApplicationId",
"value": 3604509277250831000
}
],
"sqlReaderQuery": "SELECT * from Table A",
"queryTimeout": "02:00:00",
"isolationLevel": "ReadUncommitted",
"partitionOption": "None"
},
"sink": {
"type": "AzureSqlSink",
"writeBehavior": "insert",
"sqlWriterUseTableLock": false,
"tableOption": "autoCreate",
"disableMetricsCollection": false
},
"enableStaging": false,
"translator": {
"type": "TabularTranslator",
"typeConversion": true,
"typeConversionSettings": {
"allowDataTruncation": true,
"treatBooleanAsNumber": false
}
}
}
ADF Configuration
After create table Database - column with datatype
If I convert Dynamic content into Int
#int(pipeline().parameters.application.applicationId)
Then getting below warning
Please let me know how can I set Datatype in ADF
I also tried the same and getting same result.
By default its taking nvarchar(max) from Json obj but I need bigInt
To resolve this when you add additional column in your source data set and in Mapping click onimport schema it will import the schema of the source and also give you additional column in schema you have to change the type of the column as Int64 as shown in below image. in below image you can see after name there is additional means it is an additional column.
After this run your pipeline, It will create additional column with data type bigint .
{
"name": "pipeline2",
"properties": {
"activities": [
{
"name": "Copy data1",
"type": "Copy",
"dependsOn": [],
"policy": {
"timeout": "0.12:00:00",
"retry": 0,
"retryIntervalInSeconds": 30,
"secureOutput": false,
"secureInput": false
},
"userProperties": [],
"typeProperties": {
"source": {
"type": "JsonSource",
"additionalColumns": [
{
"name": "name",
"value": {
"value": "#pipeline().parameters.demo.age",
"type": "Expression"
}
}
],
"storeSettings": {
"type": "AzureBlobFSReadSettings",
"recursive": true,
"enablePartitionDiscovery": false
},
"formatSettings": {
"type": "JsonReadSettings"
}
},
"sink": {
"type": "AzureSqlSink",
"writeBehavior": "insert",
"sqlWriterUseTableLock": false,
"tableOption": "autoCreate",
"disableMetricsCollection": false
},
"enableStaging": false,
"translator": {
"type": "TabularTranslator",
"mappings": [
{
"source": {
"path": "$['taskId']"
},
"sink": {
"name": "taskId",
"type": "String"
}
},
{
"source": {
"path": "$['taskObtainedScore']"
},
"sink": {
"name": "taskObtainedScore",
"type": "String"
}
},
{
"source": {
"path": "$['multiInstance']"
},
"sink": {
"name": "multiInstance",
"type": "String"
}
},
{
"source": {
"path": "$['name']"
},
"sink": {
"name": "name",
"type": "Int64"
}
}
],
"collectionReference": ""
}
},
"inputs": [
{
"referenceName": "Json1",
"type": "DatasetReference"
}
],
"outputs": [
{
"referenceName": "AzureSqlTable1",
"type": "DatasetReference"
}
]
}
],
"parameters": {
"demo": {
"type": "object",
"defaultValue": {
"name": "John",
"age": 30,
"isStudent": true
}
}
},
"annotations": []
}
}
OUTPUT:

Run dynamic stored procedure from azure logic app

I want to run multiple stored procedures from logic app for Azure SQL database. I want names of the stored procedure to be calculated based on a variable name.
I have a variable with values (API_test1_SP1, API_test2_SP1, API_test3_SP1).
In a for loop, I want to run these stored procedures API_test1, API_test2 and API_test3.
I want to remove _SPI from the variable names and run the stored procedures (API_test1, API_test2, API_test3) for Azure SQL database.
I tried following expression without luck
#{concat(API_,slice(#{variables('variable_name')},1,lastIndexOf('_')))}
Is it possible to run stored procedure like this in logic app?
You can use the below expression to achieve your requirement.
first(split(variables('Array')?[iterationIndexes('Until')],'_SP1'))
To reproduce the issue, I have used the below flow in my logic app.
RESULTS:
Below is the codeview of my logic app
{
"definition": {
"$schema": "https://schema.management.azure.com/providers/Microsoft.Logic/schemas/2016-06-01/workflowdefinition.json#",
"actions": {
"Initialize_variable_-_array": {
"inputs": {
"variables": [
{
"name": "Array",
"type": "array",
"value": [
"API_test1_SP1",
"API_test2_SP1",
"API_test3_SP1"
]
}
]
},
"runAfter": {},
"type": "InitializeVariable"
},
"Initialize_variable_-_loop": {
"inputs": {
"variables": [
{
"name": "loop",
"type": "integer",
"value": 0
}
]
},
"runAfter": {
"Initialize_variable_-_array": [
"Succeeded"
]
},
"type": "InitializeVariable"
},
"Until": {
"actions": {
"Compose": {
"inputs": "#first(split(variables('Array')?[iterationIndexes('Until')],'_SP1'))",
"runAfter": {},
"type": "Compose"
},
"Increment_variable": {
"inputs": {
"name": "loop",
"value": 1
},
"runAfter": {
"Compose": [
"Succeeded"
]
},
"type": "IncrementVariable"
}
},
"expression": "#equals(variables('loop'), length(variables('Array')))",
"limit": {
"count": 60,
"timeout": "PT1H"
},
"runAfter": {
"Initialize_variable_-_loop": [
"Succeeded"
]
},
"type": "Until"
}
},
"contentVersion": "1.0.0.0",
"outputs": {},
"parameters": {},
"triggers": {
"manual": {
"inputs": {
"schema": {}
},
"kind": "Http",
"type": "Request"
}
}
},
"parameters": {}
}

Logic App with azure monitor and conditions

I create a workflow with logicAPP. The goal is to notify a team when patch is missing for VM. I use azure monitor in the logic app to set the query. I decided to put after the Azure Monitor , a condition to know if the query table is empty or have data. if the table is empty, the logix is true , so it does'nt send notification, and when its false , it sends notification.
When I run , I got a logic errors. Normally , the table has not data but after condition , the function empty([my_table]) returns false and sends me notification with the result ("The query yielded no data")
what is the problem ??
Thanks
Based on the above shared requirement we have created the logic app & tested it our local environment , it is working fine.
Below is the complete logic code :
{
"definition": {
"$schema": "https://schema.management.azure.com/providers/Microsoft.Logic/schemas/2016-06-01/workflowdefinition.json#",
"actions": {
"Condition_2": {
"actions": {
"Terminate_2": {
"inputs": {
"runStatus": "Cancelled"
},
"runAfter": {},
"type": "Terminate"
}
},
"else": {
"actions": {
"Send_an_email_(V2)_2": {
"inputs": {
"body": {
"Body": "<p>#{base64ToString(body('Run_query_and_visualize_results')?['body'])}</p>",
"Subject": "list of vm from update management ",
"To": "<UserEmailId>"
},
"host": {
"connection": {
"name": "#parameters('$connections')['office365']['connectionId']"
}
},
"method": "post",
"path": "/v2/Mail"
},
"runAfter": {},
"type": "ApiConnection"
}
}
},
"expression": {
"and": [
{
"equals": [
"#length(body('Run_query_and_visualize_results')?['body'])",
0
]
}
]
},
"runAfter": {
"Run_query_and_visualize_results": [
"Succeeded"
]
},
"type": "If"
},
"Run_query_and_visualize_results": {
"inputs": {
"body": "Update\n| where Classification == 'Security Updates' or Classification == 'Critical Updates'\n| where UpdateState == 'Needed'\n| summarize by Computer,ResourceGroup,Classification,UpdateState\n|sort by Computer",
"host": {
"connection": {
"name": "#parameters('$connections')['azuremonitorlogs']['connectionId']"
}
},
"method": "post",
"path": "/visualizeQuery",
"queries": {
"resourcegroups": "<Resource_group_Name",
"resourcename": "<log analytics workspacename",
"resourcetype": "Log Analytics Workspace",
"subscriptions": "<subcription_id>",
"timerange": "Last 12 hours",
"visType": "Html Table"
}
},
"runAfter": {},
"type": "ApiConnection"
}
},
"contentVersion": "1.0.0.0",
"outputs": {},
"parameters": {
"$connections": {
"defaultValue": {},
"type": "Object"
}
},
"triggers": {
"Recurrence": {
"evaluatedRecurrence": {
"frequency": "Hour",
"interval": 3
},
"recurrence": {
"frequency": "Hour",
"interval": 3
},
"type": "Recurrence"
}
}
},
"parameters": {
"$connections": {
"value": {
"azuremonitorlogs": {
"connectionId": "/subscriptions/<subcription-id>/resourceGroups/<resource-group>/providers/Microsoft.Web/connections/azuremonitorlogs",
"connectionName": "azuremonitorlogs",
"id": "/subscriptions/<subcription-id>/providers/Microsoft.Web/locations/northcentralus/managedApis/azuremonitorlogs"
},
"office365": {
"connectionId": "/subscriptions/<subcription-id>/resourceGroups/<resource-group>/providers/Microsoft.Web/connections/office365",
"connectionName": "office365",
"id": "/subscriptions/<subcription-id>/providers/Microsoft.Web/locations/northcentralus/managedApis/office365"
}
}
}
}
}
please find the reference output of the above logic sample run :

How do I do an exact match first with Specification JPA

I'm trying to bump an exact query first using Specification and Pageable from spring data.
When making a request to this endpoint http://localhost:8080/configs?search=PATH
This is the response now:
{
"content": [
{
"groupId": "PATHOD",
"type": "company",
},
{
"groupId": "PATH",
"type": "company",
},
{
"groupId": "APATH",
"type": "company",
}
],
"pageable": {
...
},
"sort": {
"sorted": true,
"unsorted": false,
"empty": false
},
"size": 10,
"empty": false
}
I want to be like, with the exact match bumped to first place:
{
"content": [
{
"groupId": "APATH",
"type": "company",
},
{
"groupId": "PATH",
"type": "company",
},
{
"groupId": "PATHOD",
"type": "company",
}
],
"pageable": {
....
},
"sort": {
"sorted": true,
"unsorted": false,
"empty": false
},
"size": 10,
"empty": false
}
This is my code right now, but I don't know how to write the specification
fun findAllWithFilters(
exactMatchFirst: Boolean = false,
pageable: Pageable
): Page<OrderConfig> {
val spec = and(
withExactMatchFirst(exactMatchFirst)
)
return findAll(spec, pageable)
}
The PATH groupId should be the first one to appear in the results

Pass an already existing Model to next in Loopback

There is Project model
{
"name": "Project",
"plural": "Projects",
"base": "PersistedModel",
"idInjection": true,
"options": {
"validateUpsert": true
},
"properties": {
"title": {
"type": "string",
"required": true
},
"description": {
"type": "string"
},
"code": {
"type": "string"
},
"startDate": {
"type": "date",
"required": true
},
"endDate": {
"type": "date"
},
"value": {
"type": "number"
},
"infoEN": {
"type": "string"
},
"infoRU": {
"type": "string"
},
"infoAM": {
"type": "string"
},
"externalLinks": {
"type": [
"string"
]
}
},
"validations": [],
"relations": {
"industry": {
"type": "belongsTo",
"model": "Industry",
"foreignKey": "",
"options": {
"nestRemoting": true
}
},
"service": {
"type": "belongsTo",
"model": "Service",
"foreignKey": "",
"options": {
"nestRemoting": true
}
},
"tags": {
"type": "hasAndBelongsToMany",
"model": "Tag",
"foreignKey": "",
"options": {
"nestRemoting": true
}
}
},
"acls": [],
"methods": {}
}
And it hasAndBelongsToMany tags
here is Tag model
{
"name": "Tag",
"plural": "Tags",
"base": "PersistedModel",
"idInjection": true,
"options": {
"validateUpsert": true
},
"properties": {
"name": {
"type": "string",
"required": true
}
},
"validations": [],
"relations": {},
"acls": [],
"methods": {}
}
Now when the relation is created loopback api gives this api endpoint.
POST /Projects/{id}/tags
This creates a new tag into the tags collection and adds it to the project.
But what about adding an already existing tag to the project?
So I figured maybe I add before save hook to the Tag
Here I'll check if the tag exists and then pass the existing one for the relation.
Something like this.
tag.js
'use strict';
module.exports = function(Tag) {
Tag.observe('before save', function(ctx, next) {
console.log(ctx.instance);
Tag.find({name: ctx.instance.name})
next();
});
// Tag.validatesUniquenessOf('name', {message: 'name is not unique'});
};
#HaykSafaryan it just demo to show you how to use tag inside project
var app = require('../../server/server');
module.exports = function(project) {
var tag=app.models.tags
//afterremote it just demo. you can use any method
project.afterRemote('create', function(ctx, next) {
tag.find({name: ctx.instance.name},function(err,result)){
if(err) throw err;
next()
}
});
};
this is just example code to show you how to use update,create ,find ,upsertwithwhere etc. tag for validation you have to setup condition over here it will not take validation which you defined in tags models