I'm working with Ecto (with Postgres) for the first time and I have the following two schemas (both somewhat simplified):
defmodule RailroadServer.Database.RailroadSystem do
#moduledoc """
Schema for an entire railroad system.
"""
use Ecto.Schema
import Ecto.Changeset
alias RailroadServer.Database
schema "railroad_systems" do
field :railroad_system_name, :string
has_many :depos, Database.Depo
end
#fields ~w(railroad_system_name)a
def changeset(data, params \\ %{}) do
data
|> cast(params, #fields)
|> validate_required([:railroad_system_name])
|> validate_length(:railroad_system_name, max: 50)
end
end
defmodule RailroadServer.Database.Depo do
#moduledoc """
Schema for a node that stores trains.
"""
use Ecto.Schema
import Ecto.Changeset
alias RailroadServer.Database
schema "depos" do
field :capacity, :integer
field :depo_uuid, :string
field :depo_name, :string
belongs_to :railroad_system, Database.RailroadSystem
end
#fields ~w(capacity depo_uuid depo_name)a
def changeset(data, params \\ %{}) do
data
|> cast(params, #fields)
|> validate_required([:capacity, :depo_uuid, :depo_name])
|> validate_number(:capacity, greater_than: 0)
|> validate_length(:depo_name, max: 50)
|> validate_length(:depo_uuid, max: 50)
|> foreign_key_constraint(:railroad_system_id)
end
end
Based on these migrations:
defmodule RailroadServer.Database.Repo.Migrations.CreateRailroadSystems do
use Ecto.Migration
def change do
create table(:railroad_systems) do
add :railroad_system_name, :varchar, null: false, size: 50
end
create unique_index("railroad_systems", [:railroad_system_name])
end
end
defmodule RailroadServer.Database.Repo.Migrations.CreateDepos do
use Ecto.Migration
def change do
create table(:depos) do
add :railroad_system_id, references("railroad_systems"), null: false
add :depo_uuid, :varchar, size: 50, null: false
add :depo_name, :varchar, size: 50, null: false
add :capacity, :integer, null: false
end
create index("depos", [:railroad_system_id])
create index("depos", [:depo_uuid], unique: true)
create index("depos", [:depo_name], unique: true)
end
end
Which I'm constructing with the following code:
def insert_railway_system(system_name, depos) do
cs = %RailroadSystem{}
|> RailroadSystem.changeset(%{railroad_system_name: system_name})
|> put_assoc(:depos, create_depos(depos))
if cs.valid? do
Repo.insert(cs)
else
{:error, cs}
end
end
_ = """
Uses a list of depo nodes to construct a list of depo changeset.
"""
defp create_depos(depos) do
Enum.map(depos, fn(depo) -> Depo.changeset(%Depo{}, depo) end)
end
However, when I run this function (with data that produces a valid changeset), I get a NULL column error because the foreign key for the railway system in the depo struct doesn't exist. How do I make sure that Ecto passes that foreign key?
The output:
19:06:07.401 [debug] QUERY OK db=0.8ms
begin []
19:06:07.406 [debug] QUERY OK db=0.6ms
INSERT INTO "railroad_systems" ("railroad_system_name") VALUES ($1) RETURNING "id" ["test Can insert railway system"]
19:06:07.409 [debug] QUERY ERROR db=2.7ms
INSERT INTO "depos" ("capacity","depo_name","depo_uuid") VALUES ($1,$2,$3) RETURNING "id" [23, "A depo", "d387a91b-db77-4758-87ed-9951d5c2de8a"]
19:06:07.410 [debug] QUERY OK db=0.1ms
rollback []
1) test Can insert railway system (RailroadServer.DatabaseTest)
apps/railroad_server/test/railroad_server/database_test.exs:9
** (Postgrex.Error) ERROR 23502 (not_null_violation) null value in column "railroad_system_id" violates not-null constraint
table: depos
column: railroad_system_id
Failing row contains (3, null, d387a91b-db77-4758-87ed-9951d5c2de8a, A depo, 23).
stacktrace:
(ecto_sql) lib/ecto/adapters/sql.ex:621: Ecto.Adapters.SQL.raise_sql_call_error/1
(ecto) lib/ecto/repo/schema.ex:649: Ecto.Repo.Schema.apply/4
(ecto) lib/ecto/repo/schema.ex:262: anonymous fn/15 in Ecto.Repo.Schema.do_insert/4
(ecto) lib/ecto/association.ex:927: Ecto.Association.BelongsTo.on_repo_change/5
(ecto) lib/ecto/association.ex:413: Ecto.Association.on_repo_change/7
(elixir) lib/enum.ex:1948: Enum."-reduce/3-lists^foldl/2-0-"/3
(ecto) lib/ecto/association.ex:392: Ecto.Association.on_repo_change/4
(ecto) lib/ecto/repo/schema.ex:811: Ecto.Repo.Schema.process_parents/4
(ecto) lib/ecto/repo/schema.ex:242: anonymous fn/15 in Ecto.Repo.Schema.do_insert/4
(ecto) lib/ecto/association.ex:662: Ecto.Association.Has.on_repo_change/5
(ecto) lib/ecto/association.ex:432: anonymous fn/8 in Ecto.Association.on_repo_change/7
(elixir) lib/enum.ex:1948: Enum."-reduce/3-lists^foldl/2-0-"/3
(ecto) lib/ecto/association.ex:428: Ecto.Association.on_repo_change/7
(elixir) lib/enum.ex:1948: Enum."-reduce/3-lists^foldl/2-0-"/3
(ecto) lib/ecto/association.ex:392: Ecto.Association.on_repo_change/4
(ecto) lib/ecto/repo/schema.ex:837: Ecto.Repo.Schema.process_children/5
(ecto) lib/ecto/repo/schema.ex:914: anonymous fn/3 in Ecto.Repo.Schema.wrap_in_transaction/6
(ecto_sql) lib/ecto/adapters/sql.ex:890: anonymous fn/3 in Ecto.Adapters.SQL.checkout_or_transaction/4
(db_connection) lib/db_connection.ex:1415: DBConnection.run_transaction/4
(railroad_server) lib/railroad_server/database.ex:61: RailroadServer.Database.insert_railway_system/4
Versions:
Elixir - 1.9.0
Ecto - 3.17
Postgrex - 0.14.3
Prosgres - 11.4
When I try an insert with identical code as in your insert_railway_system(), I do not get a NULL column error.
My schemas are similar. The only significant difference in my code is in the changeset where I have the constraint:
|> assoc_constraint()
instead of:
|> foreign_key_constraint()
But I changed my code to try the foreign_key_constraint(), leaving the argument the same, which is equivalent to your :railroad_system, and the insert still worked. The insert also worked when I did the equivalent of foreign_key_constraint(:railroad_system_id). In fact, if I use foreign_key_constraint(:hello_world), the insert still works, so as far as I can tell, the second argument to foreign_key_constraint() is ignored, which is puzzling. I even did mix ecto.reset, which deletes the repo/database, recreates the repo/database, then executes the migrations, which creates the tables in the repo/database, and I got the same results.
My "create_depos" migration has the equivalent of the following:
add :railroad_system_id, references(:railroad_systems)
Please post:
The create_depos() function (although for me just using an attributes map instead of a changeset also worked)
The full stack trace of the error.
Your migrations.
Related
I am trying to delete some data from Azure SQL from Databricks using JDBC, it generate error each time. I have very simple query delete from table1 where date>'2022-05-01'.
I searched many documents online but did not find any appropriate solution for this. Please find below code.
jdbcUsername = "userName"
jdbcPassword = "password" #these from Azure Key Vault
jdbcHostname = "host server name"
jdbcPort = "1433"
jdbcDatabase = "db_test"
jdbcUrl = "jdbc:sqlserver://{0}:{1};database={2}".format(jdbcHostname, jdbcPort, jdbcDatabase)
connectionProperties = {
"user" : jdbcUsername,
"password" : jdbcPassword,
"driver" : "com.microsoft.sqlserver.jdbc.SQLServerDriver"
}
pushdown_delete_query = f"(delete from table1 where date>'2022-05-01') table_alias"
print(pushdown_delete_query)
spark.read.jdbc(url=jdbcUrl, table=pushdown_delete_query, properties=connectionProperties)
the query return error com.microsoft.sqlserver.jdbc.SQLServerException: A nested INSERT, UPDATE, DELETE, or MERGE statement must have an OUTPUT clause
I exported a copy of the database from the emulator and moved the test database to an external file. This has been working well for sometime, however, today something changed and this error appeared.
Pre-packaged database has an invalid schema: tableLinkUserToPassword
Expected:
TableInfo
{
name='tableLinkUserToPassword',
columns = {
userId = Column { name='userId', type='INTEGER', affinity='3', notNull=true, primaryKeyPosition=1, defaultValue='null' },
password = Column { name='password', type='TEXT', affinity='2', notNull=true, primaryKeyPosition=0, defaultValue='null' }
},
foreignKeys = [ForeignKey
{
referenceTable='tableUser', onDelete='CASCADE', onUpdate='NO ACTION', columnNames=[userId], referenceColumnNames=[userId]
}
],
indices = [Index { name='index_tableLinkUserToPassword_password', unique=false, columns=[password], orders=[ASC] },
Index { name='index_tableLinkUserToPassword_userId', unique=false, columns=[userId], orders=[ASC]}
]
}
Found:
TableInfo{name='tableLinkUserToPassword', columns={password=Column{name='password', type='TEXT', affinity='2', notNull=false, primaryKeyPosition=0, defaultValue='null'}, userId=Column{name='userId', type='INTEGER', affinity='3', notNull=false, primaryKeyPosition=1, defaultValue='null'}}, foreignKeys=[ForeignKey{referenceTable='tableUser', onDelete='CASCADE', onUpdate='NO ACTION', columnNames=[userId], referenceColumnNames=[userId]}], indices=[Index{name='index_tableLinkUserToPassword_userId', unique=false, columns=[userId], orders=[ASC]}, Index{name='index_tableLinkUserToPassword_password', unique=false, columns=[password], orders=[ASC]}]}
The UserId and the Password columns are switched in order. And notNull=false seems not to match.
I have DB Browser but how would I change the order or columns?
Entity:
import androidx.room.ColumnInfo
import androidx.room.Entity
import androidx.room.ForeignKey
import androidx.room.PrimaryKey
#Entity(
tableName = "tableLinkUserToPassword",
foreignKeys = [
ForeignKey(
entity = EntityUser::class,
parentColumns = ["userId"],
childColumns = ["userId"],
onDelete = ForeignKey.CASCADE
)
]
)
data class EntityLinkUserToPassword(
#PrimaryKey(autoGenerate = true)
#ColumnInfo(index = true)
val userId: Int,
val password: String,
)
Thanks for any assistance
The order in which the columns appear is not an issue, it's the order in which they are extracted (I think). It is the reported values that matter.
However, what can be seen, as an example, is that the password column EXPECTED (what is extracted from the #Entity annotated class) has the NOT NULL constraint (i.e. it must not be null) as per notNull=true, whilst it found that there is no NOT NULL constraint coded for the password column in the FOUND (i.e. the pre-packaged database) as per notNull=false.
So you either have to change the EntityLinkUserToPassword class to allow null, or change the pre-packaged database to have NOT NULL coded on the password column.
e.g. val password: String?,
You need to check ALL columns for discrepancies between the found and expected.
P.S. a second index on the userId column is a waste and inefficient. The PrimaryKey is an index. So there is no need for the #ColumnInfo annotation.
However, again there is another discrepancy the second index on the pre-packaged database is on the password column. So you should have the #ColumnInfo annotation moved to apply to the password val/column e.g. I believe that you want :-
data class EntityLinkUserToPassword(
#PrimaryKey(autoGenerate = true)
val userId: Int,
#ColumnInfo(index = true)
val password: String?,
)
Note the above is based upon observation, the suggested code has not been tested and is not necessarily full comprehensive, so may contain omissions and or errors.
I have DB Broswer but how would I chnage the order or columns?
You would/could :-
Rename the EntityLinkUserToPassword table e.g. ALTER TABLE EntityLinkUserToPassword RENAME TO renamed_EntityLinkUserToPassword;
Use UPDATE renamed_EntityLinkUserToPassword SET password = 'a suitable default value' WHERE password IS NULL;
This so that you don't get NOT NULL constraint conflicts when copying the data.
Create the new table with the correct schema (see later)
Use INSERT INTO EntityLinkUserToPassword SELECT * FROM renamed_EntityLinkUserToPassword ORDER BY userId ASC;
DROP TABLE IF EXISTS renamed_EntityLinkUserToPassword'
Getting the correct schema
With the classes annotated with #Entity coded as required AND defined in the entities parameter of the #Database annotation, compile the project.
using the Android View in Android Studio look at the Java(generated) for a class the same name as the #Database annotated class but suffixed with _Impl.
Look for the createAllTables method. The SQL for the creation of the tables is hard coded. Copy it and this will be the EXACT SQL.
I am attempting to post to S3 from an Elm application. I have a backend service that generates the signature, policy, etc required to make the post.
I have read through the docs and many of the posts about how to post to S3 and the need to use AWS4-HMAC-SHA256, however, my code is still failing with the error.
Elixir Code to create signature
defp signature(signing_key, string_to_sign) do
hmac_sha256(signing_key, string_to_sign)
|> bytes_to_string
end
defp signing_key(secret_key, date, region) do
hmac_sha256("AWS4#{secret_key}", date)
|> hmac_sha256(region)
|> hmac_sha256(#service)
|> hmac_sha256(#aws_request)
end
def hmac_sha256(key, data) do
:crypto.hmac(:sha256, key, data)
end
def bytes_to_string(bytes) do
Base.encode16(bytes, case: :lower)
end
Elixir code to create signature
defp policy(key, mimetype, credential, date, expiration_window \\ 60) do
%{
expiration: now_plus(expiration_window),
conditions: [
%{bucket: bucket_name},
["starts-with", "$key", key],
%{acl: "public-read"},
%{success_action_status: "201"},
["starts-with", "$Content-Type", mimetype],
%{"x-amz-credential": credential},
%{"x-amz-algorithm": "AWS4-HMAC-SHA256"},
%{"x-amz-date": date}
]
}
|> Poison.encode!
|> Base.encode64
end
Elixir code to create credential
defp credential(date) do
credential(aws_config[:access_key_id], date)
end
defp credential(key, date) do
key <> "/" <> date <> "/" <> region() <> "/" <> #service <> "/" <> #aws_request
end
Elm Code that makes post
makeMultiPart : UploadSignatureModel -> File -> Http.Body
makeMultiPart uploadSignature file =
Http.multipartBody
[ Http.stringPart "key" uploadSignature.key
, Http.stringPart "acl" uploadSignature.acl
, Http.stringPart "success_action_status" "201"
, Http.stringPart "Content-Type" uploadSignature.content_type
, Http.stringPart "X-Amz-Credential" uploadSignature.credential
, Http.stringPart "X-Amz-Algorithm" "AWS4-HMAC-SHA256"
, Http.stringPart "Policy" uploadSignature.policy
, Http.stringPart "Signature" uploadSignature.signature
, Http.stringPart "AWSAccessKeyId" uploadSignature.aws_access_key_id
, Http.filePart "file" file
]
Obviously, I am missing something but for the life of me can't figure it out.
EDIT:
The error code the user gets is this:
<Error>
<Code>InvalidRequest</Code>
<Message>The authorization mechanism you have provided is not supported. Please use AWS4-HMAC-SHA256.</Message>
<RequestId>0B1FCA2C05E910B1</RequestId>
<HostId>7ydiqVEEPu22aN+o1BJhAQDQbDXBodChOfHv7986R8ItnhQ5hv0/iETzakTH8gLVljjqKr3lIUg=</HostId>
</Error>
According to http://elixir-lang.org/getting-started/alias-require-and-import.html#aliases
I should be able to have this code working:
defmodule A do
alias A.B, as: C
defmodule B do
defstruct name: ""
end
end
iex(1)> %C{}
But instead i'm having this error:
** (CompileError) iex:1: C.__struct__/0 is undefined, cannot expand struct C
Any idea of what i'm missing here ?
Edit: Module naming is simplified here for the exemple
This works only for the module in which the alias is defined, e.g.:
defmodule A do
alias A.B, as: C
defmodule B do
defstruct name: ""
end
def new do
%C{}
end
end
You could then do:
iex(6)> A.new
%A.B{name: ""}
This will also work in iex if you type the alias there:
iex(7)> alias A.B, as: C
nil
iex(8)> %C{}
%A.B{name: ""}
I have the following model:
class test_data_urls(models.Model):
url = models.CharField(max_length=200, db_index=True)
I want to insert a value into mysql:
cursor = connection.cursor()
url = "hiya"
cursor.execute("insert into my_table(url) values (%s)", (url))
I get an error:
'str' object has no attribute 'items'
This works:
cursor.execute("insert into my_table(url) values ('test')")
but I want to do it with %s. To me, it looks exactly like how I have always done this, so what am I missing?
Try using [ and ] instead of ( and ):
cursor = connection.cursor()
url = "hiya"
cursor.execute("insert into my_table(url) values (%s)", [url])
The reason that (url) does not work is because it's not a tuple, it's a single string. (url,) would be a tuple.