Flask SQLAlchemy have changes in parent database show up in child databases - flask-sqlalchemy

I'm trying to set up my database such that any changes that occur in the parent database also shows up in the children databases as well. I have been looking at cascade and trying different variations but nothing is working.
class users(db.Model):
swipeNum = db.Column(db.BigInteger, nullable=False)#up to 18 digits safely
UID = db.Column(db.Integer, primary_key=True, nullable=False)
directoryID = db.Column(db.String(strlen), nullable=False)
firstName = db.Column(db.String(strlen), nullable=False)
lastName = db.Column(db.String(strlen), nullable=False)
email = db.Column(db.String(strlen), nullable=False)#maybe nullable?
userType = db.Column(db.Integer, nullable=False, default=0)
requests = db.relationship('requests', backref='requestor', lazy='dynamic', cascade='all, delete, delete-orphan')
class requests(db.Model):
requestID = db.Column(db.Integer, primary_key=True, nullable=False, autoincrement=True)
timestamp = db.Column(db.DateTime, index=True, default=datetime.utcnow)
reason = db.Column(db.String(strlen), nullable=False)#might need something more robust
# foreign
user = db.Column(db.Integer, db.ForeignKey('users.UID'))
And this is my test that I'm running. Only the last assert fails.
# Change UID of user 1234 to 4321
user = api.db.session.query(models.users).filter_by(UID=1234).first()
assert user.firstName == "Terrapin"
user.UID = 4321
api.db.session.commit()
api.db.session.close()
user2 = api.db.session.query(models.users).filter_by(UID=4321).first()
assert user2.UID == 4321
# Get lab permissions for 4321 and assert the same
user = api.db.session.query(models.requests).filter_by(user=4321).first()
assert user.user == 4321
Any help would be incredibly appreciated!

Related

How to perform a data migration with Alembic and two versions of my Table?

I'm trying to refactor a database model; separate one column out of a table into another new one. I'd like to do this using existing SQLAlchemy Core models & Alembic. I'd also like to use server-side INSERT ... FROM SELECT ...-style query to migrate data (docs). By avoiding having to copy all the gazillion of rows to Python-world I hope to have maximum scalability, maximum performance and minimum downtime.
My problem is the programmatic use of SQLAlchemy running on two versions of the same table name in a single Metadata context. Should I resort to using an textual SQL instead? 😕
schema.py before:
class User(Base):
__tablename__ = "users"
id = Column(BigInteger, primary_key=True, autoincrement=False, nullable=False)
[...]
profile_picture_url = Column(String, nullable=True)
schema.py after:
class User(Base):
__tablename__ = "users"
id = Column(BigInteger, primary_key=True, autoincrement=False, nullable=False)
[...]
class UserProfileExtras(Base):
__tablename__ = "user_profile_extras"
user_id = Column(BigInteger, ForeignKey("users.id"), index=True, nullable=False)
profile_picture_url = Column(String, nullable=False)
So here's my attempt to create an Alembic upgrade script:
# Import the new/current-in-code models.
from ... import User, UserProfileExtras
# Define the previous User model in order to operate on the current/old schema.
class UserBeforeUpgrade(Base):
__tablename__ = "users"
id = Column(BigInteger, primary_key=True, autoincrement=False, nullable=False)
[...]
profile_picture_url = Column(String, nullable=True)
table_before_upgrade: Table = UserBeforeUpgrade.__table__
new_target_table = UserProfileExtras.__table__
[...]
def upgrade() -> None:
op.create_table(
"user_profile_extras",
sa.Column("user_id", sa.BigInteger(), autoincrement=False, nullable=False),
sa.Column("profile_picture_url", sa.VARCHAR(), nullable=False),
[...]
)
from_user_table = (select([table_before_upgrade.c.id, table_before_upgrade.c.profile_picture_url])
.where(table_before_upgrade.c.profile_picture_url != None))
insert_from = (
new_target_table.insert().from_select(
[new_target_table.c.user_id, new_target_table.c.profile_picture_url],
from_user_table)
)
op.execute(insert_from))
[...]
[...]
Error:
sqlalchemy.exc.InvalidRequestError: Table 'users' is already defined for this MetaData instance.
Specify 'extend_existing=True' to redefine options and columns on an existing Table object.

ORM query many to many to one, Flask sql-alchemy

I have three flask-sqlalchemy-models. Books - unique entries by admin:
class Book(db.Model):
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
book_name = db.Column(db.String(20), unique=True, nullable=False)
def __repr__(self):
return self.book_name
Bookscomp - entries by users, related to above:
class Bookscomp(db.Model):
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
book = db.Column(db.Integer, db.ForeignKey('book.id'))
Company - user, related to above:
class Company(db.Model):
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
name = db.Column(db.String(100), nullable=False)
books = db.relationship('Bookscomp', secondary=companybook, lazy='dynamic',
backref=db.backref('company'))
companybook = db.Table('companybook',
db.Column('companyid', db.Integer, db.ForeignKey('company.id'), primary_key=True),
db.Column('bookid', db.Integer, db.ForeignKey('bookscomp.id'), primary_key=True),
)
Problem: I am trying to get book_name from Book model, through Company + Bookscomp.
So a company has many books and each book has reference to general book info.
Tried like this:
company = OrganizatCompanyion.query.filter_by(id=comp.id).first()
books = company.books.all()
for item in books:
print(item.book.book_name)
#AttributeError: 'int' object has no attribute
print(item.book)
#Gives book id from book model, but I need name
Why I cannot get book_name in the above code snippet directly? And how would it be best to achieve this?
You haven't defined a relationship between Book and Bookcomp so when you ask for item.book-- it's getting the book = db.Column(db.Integer... value. Maybe tweak your Bookscomp model to be something like:
class Bookscomp(db.Model):
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
book_id = db.Column(db.Integer, db.ForeignKey('book.id'))
book = db.relationship('Book')

Alembic sqlalchemy.exc.NoReferencedColumnError: (Using Flask-sqlalchemy and Flask-Migrate)

Alembic keeps giving me this error when I try to migrate my schema even though the initial migration went without a hitch.
sqlalchemy.exc.NoReferencedColumnError: Could not initialize target column for ForeignKey 'dataset.datasetid' on table 'analysis': table 'dataset' has no column named 'datasetid'
Here is a part of my models.py class
class Dataset(db.Model):
DatasetID = db.Column(db.Integer, primary_key = True)
SampleID = db.Column(db.String(50), db.ForeignKey('sample.SampleID', onupdate="cascade",ondelete="restrict"), nullable=False)
UploadDate = db.Column(db.Date, nullable=False)
UploadID = db.Column(db.Integer,db.ForeignKey('uploaders.UploadID', onupdate="cascade",ondelete="restrict"), nullable=False)
UploadStatus = db.Column(db.String(45), nullable=False)
HPFPath = db.Column(db.String(500))
DatasetType = db.Column(db.String(45), nullable=False)
SolvedStatus = db.Column(db.String(30), nullable=False)
InputFile = db.Column(db.Text)
RunID = db.Column(db.String(45))
Notes = db.Column(db.Text)
analyses = db.relationship('Analysis',backref='dataset',lazy='dynamic')
data2Cohorts = db.relationship('Dataset2Cohort',backref='dataset',lazy='dynamic')
class Dataset2Cohort(db.Model):
__tablename__='dataset2Cohort'
DatasetID = db.Column(db.Integer, db.ForeignKey('dataset.DatasetID', onupdate="cascade",ondelete="cascade"), nullable=False, primary_key = True)
CohortID = db.Column(db.Integer, db.ForeignKey('cohort.CohortID', onupdate="cascade", ondelete="restrict"), nullable=False, primary_key = True)
class Analysis(db.Model):
AnalysisID = db.Column(db.String(100), primary_key = True)
DatasetID = db.Column(db.Integer, db.ForeignKey('dataset.DatasetID', onupdate="cascade",ondelete="cascade"), nullable=False)
PipelineVersion = db.Column(db.String(30))
ResultsDirectory = db.Column(db.Text)
ResultsBAM = db.Column(db.Text)
AssignedTo = db.Column(db.String(100), nullable=True)
analysisStatuses = db.relationship('AnalysisStatus', backref='analysis', lazy='dynamic')
Does anyone know why I keep getting that error even though I have the DatasetID column in the Dataset table?
Thank you,
Teja.
Found a solution.
This seems to be an issue with how Mysql 8.x versions refer to column names in the foreign key declaration - Mysql 8.x versions always use lowercase when a column is referenced in Foreign Key statements, which cause an incompatibility with sqlalchemy. This issue is discussed here
https://github.com/sqlalchemy/sqlalchemy/issues/4344
Solution is to just upgrade the sqlalchemy to the latest version (>=1.2.x)
Teja.

SQLAlchemy Find a user with a single query (probably join necessary)

I have three models which have relationships.
Firstly, the participant model.
class Participant(UserMixin, db.Model):
__tablename__ = 'participants'
id = db.Column(db.Integer, primary_key=True)
email = db.Column(db.String(64), index=True)
team_id = db.Column(db.Integer, db.ForeignKey('teams.id'))
# Relationships
team = db.relationship("Team", back_populates="members")
Secondly, the event model.
class Event(db.Model):
__tablename__ = 'events'
id = db.Column(db.Integer, primary_key=True)
Thirdly, the team model.
class Team(db.Model):
__tablename__ = 'teams'
id = db.Column(db.Integer, primary_key=True)
event_id = db.Column(db.Integer, db.ForeignKey('events.id'))
# Relationships
members = db.relationship('Participant', back_populates="team")
Several participants are allowed to have the same email address if their team is not connected to the same event.
I am looking for a query which checks if there is a participant with a given email address who is connected to a team, which is connected to the same event. I know the event.id and the email address in advance.
Pseudo code
def check(EVENTID, EMAIL):
if db.session.query(Team, Event, Participant). \
filter(Team.event_id == EVENTID). \
filter(Participant.team_id == Team.id). \
filter(Participant.email == EMAIL).first():
return true
I think it can be done with one single query using joins, but I couldn't figure it out. Please help!

nested sqlalchemy filter with parent and son

With the following scheme:
class User(Base):
id = Column(Integer, primary_key=True)
name = Column(String)
class Photo(Base):
id = Column(Integer, primary_key=True)
user_id = Column(Integer, ForeignKey(User.id), nullable=False)
user = relationship(User)
class Tag(Base):
id = Column(Integer, primary_key=True)
tag_name = Column(String)
tag_version = Column(Integer)
photo_id = Column(Integer, ForeignKey(Photo.id), nullable=False)
photo = relationship(Photo)
How do I create an SQLAlchemy query to get all the photos of a specific user, that don't have a specific tag and version.
As in "all the photos of the user with id "1234" that don't have a "cat" of version "2" tagged in them".
Also interesting would be "all the users who have at least one photo without a specific tag"
I'm using postgreSQL btw.
Here is a complete example that sets up relationships, creates some sample data, then performs your two queries.
Setup:
from datetime import datetime
from sqlalchemy import create_engine, Column, Integer, String, ForeignKey, not_
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker, relationship
engine = create_engine('sqlite:///', echo=True)
Session = sessionmaker(bind=engine)
session = Session()
Base = declarative_base(bind=engine)
class User(Base):
__tablename__ = 'user'
id = Column(Integer, primary_key=True)
name = Column(String, nullable=False)
class Photo(Base):
__tablename__ = 'photo'
id = Column(Integer, primary_key=True)
name = Column(String, nullable=False)
user_id = Column(Integer, ForeignKey(User.id), nullable=False)
user = relationship(User, backref='photos')
class Tag(Base):
__tablename__ = 'tag'
id = Column(Integer, primary_key=True)
name = Column(String, nullable=False)
photo_id = Column(Integer, ForeignKey(Photo.id), nullable=False)
photo = relationship(Photo, backref='tags')
Base.metadata.create_all()
session.add(User(name='davidism', photos=[
Photo(name='sun', tags=[Tag(name='bright'), Tag(name='day')]),
Photo(name='moon', tags=[Tag(name='bright'), Tag(name='night')])
]))
session.add(User(name='eran', photos=[
Photo(name='party', tags=[Tag(name='people'), Tag(name='night')]),
Photo(name='cat')
]))
session.commit()
Query all photos with no tags at all:
no_tags = session.query(Photo).outerjoin(Photo.tags).filter(not_(Photo.tags.any())).all()
print 'no tags: ', len(no_tags)
Query all photos without the tag 'night':
not_night = session.query(Photo).outerjoin(Photo.tags).filter(not_(Photo.tags.any(Tag.name == 'night'))).all()
print 'not night: ', len(not_night)
Assuming existance of backrefs Tag.photo = relationship(Photo, backref='tags') and
Photo.user = relationship(User, backref="photos") both can be done using any construct. This might not generate the most optimal SQL SELECT statement, but it is a very clean sqlalchemy.
Part-1: "all the photos of the user with id "1234" that don't have a "cat" of version "2" tagged in them"
def get_user_photos_without_tag(user_id, tag_name, tag_version):
qry = (session.query(Photo)
.filter(~Photo.tags.any(and_(
Tag.tag_name == tag_name,
Tag.tag_version == tag_version))
)
.filter(Photo.user_id == user_id)
)
return qry.all()
photos = get_user_photos_without_tag(1234, 'cat', 2)
Part-2: "all the users who have at least one photo without a specific tag"
def get_user_with_photos_without_tag(tag_name, tag_version):
qry = (session.query(User)
.filter(User.photos.any(
~Photo.tags.any(and_(
Tag.tag_name == tag_name,
Tag.tag_version == tag_version))
))
)
return qry.all()
res = get_user_with_photos_without_tag('cat', 2)