chore: Add migration for new dataset database model field
This commit is contained in:
parent
69777ef0a5
commit
cf9edf2663
2 changed files with 68 additions and 2 deletions
|
|
@ -0,0 +1,66 @@
|
||||||
|
"""Expand dataset database with json connection field
|
||||||
|
|
||||||
|
Revision ID: 46a6ce2bd2b2
|
||||||
|
Revises: 76625596c5c3
|
||||||
|
Create Date: 2025-11-25 17:56:28.938931
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
|
from typing import Sequence, Union
|
||||||
|
|
||||||
|
from alembic import op
|
||||||
|
import sqlalchemy as sa
|
||||||
|
|
||||||
|
|
||||||
|
# revision identifiers, used by Alembic.
|
||||||
|
revision: str = "46a6ce2bd2b2"
|
||||||
|
down_revision: Union[str, None] = "76625596c5c3"
|
||||||
|
branch_labels: Union[str, Sequence[str], None] = None
|
||||||
|
depends_on: Union[str, Sequence[str], None] = None
|
||||||
|
|
||||||
|
|
||||||
|
def _get_column(inspector, table, name, schema=None):
|
||||||
|
for col in inspector.get_columns(table, schema=schema):
|
||||||
|
if col["name"] == name:
|
||||||
|
return col
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
def upgrade() -> None:
|
||||||
|
conn = op.get_bind()
|
||||||
|
insp = sa.inspect(conn)
|
||||||
|
|
||||||
|
vector_database_connection_info_column = _get_column(
|
||||||
|
insp, "dataset_database", "vector_database_connection_info"
|
||||||
|
)
|
||||||
|
if not vector_database_connection_info_column:
|
||||||
|
op.add_column(
|
||||||
|
"dataset_database",
|
||||||
|
sa.Column(
|
||||||
|
"vector_database_connection_info",
|
||||||
|
sa.JSON(),
|
||||||
|
unique=False,
|
||||||
|
nullable=False,
|
||||||
|
default={},
|
||||||
|
),
|
||||||
|
)
|
||||||
|
|
||||||
|
graph_database_connection_info_column = _get_column(
|
||||||
|
insp, "dataset_database", "graph_database_connection_info"
|
||||||
|
)
|
||||||
|
if not graph_database_connection_info_column:
|
||||||
|
op.add_column(
|
||||||
|
"dataset_database",
|
||||||
|
sa.Column(
|
||||||
|
"graph_database_connection_info",
|
||||||
|
sa.JSON(),
|
||||||
|
unique=False,
|
||||||
|
nullable=False,
|
||||||
|
default={},
|
||||||
|
),
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def downgrade() -> None:
|
||||||
|
op.drop_column("dataset_database", "vector_database_connection_info")
|
||||||
|
op.drop_column("dataset_database", "graph_database_connection_info")
|
||||||
|
|
@ -27,8 +27,8 @@ class DatasetDatabase(Base):
|
||||||
# TODO: Instead of specifying and forwawrding all these individual fields, consider using a JSON field to store
|
# TODO: Instead of specifying and forwawrding all these individual fields, consider using a JSON field to store
|
||||||
# configuration details for different database types. This would make it more flexible to add new database types
|
# configuration details for different database types. This would make it more flexible to add new database types
|
||||||
# without changing the database schema.
|
# without changing the database schema.
|
||||||
graph_database_connection_info = Column(JSON, unique=False, nullable=True)
|
graph_database_connection_info = Column(JSON, unique=False, nullable=False, default={})
|
||||||
vector_database_connection_info = Column(JSON, unique=False, nullable=True)
|
vector_database_connection_info = Column(JSON, unique=False, nullable=False, default={})
|
||||||
|
|
||||||
created_at = Column(DateTime(timezone=True), default=lambda: datetime.now(timezone.utc))
|
created_at = Column(DateTime(timezone=True), default=lambda: datetime.now(timezone.utc))
|
||||||
updated_at = Column(DateTime(timezone=True), onupdate=lambda: datetime.now(timezone.utc))
|
updated_at = Column(DateTime(timezone=True), onupdate=lambda: datetime.now(timezone.utc))
|
||||||
|
|
|
||||||
Loading…
Add table
Reference in a new issue