Alembic revision autogeneration creates temp tables #1470
-
I am trying to autogenerate a migration against an Azure SQL Edge database. When the database is initialized the tempdb has no tables,
Dockerfile# Use the Azure SQL Edge image
FROM mcr.microsoft.com/azure-sql-edge:latest
# Set environment variables
ENV ACCEPT_EULA=1 \
MSSQL_SA_PASSWORD=password123 \
MSSQL_PID=Developer
# Expose port 1433 (internally within the container)
EXPOSE 1433 alembic.ini, env.py# alembic.ini
[alembic]
# path to migration scripts
script_location = alembic
# template used to generate migration file names; The default value is %%(rev)s_%%(slug)s
# Uncomment the line below if you want the files to be prepended with date and time
# see https://alembic.sqlalchemy.org/en/latest/tutorial.html#editing-the-ini-file
# for all available tokens
file_template = %%(year)d-%%(month).2d-%%(day).2d-%%(hour).2d-%%(minute).2d_%%(rev)s
# sys.path path, will be prepended to sys.path if present.
# defaults to the current working directory.
# prepend_sys_path = .
# timezone to use when rendering the date within the migration file
# as well as the filename.
# If specified, requires the python>=3.9 or backports.zoneinfo library.
# Any required deps can installed by adding `alembic[tz]` to the pip requirements
# string value is passed to ZoneInfo()
# leave blank for localtime
# timezone =
# max length of characters to apply to the
# "slug" field
# truncate_slug_length = 40
# set to 'true' to run the environment during
# the 'revision' command, regardless of autogenerate
# revision_environment = false
# set to 'true' to allow .pyc and .pyo files without
# a source .py file to be detected as revisions in the
# versions/ directory
# sourceless = false
# version location specification; This defaults
# to alembic/versions. When using multiple version
# directories, initial revisions must be specified with --version-path.
# The path separator used here should be the separator specified by "version_path_separator" below.
# version_locations = %(here)s/bar:%(here)s/bat:alembic/versions
# version path separator; As mentioned above, this is the character used to split
# version_locations. The default within new alembic.ini files is "os", which uses os.pathsep.
# If this key is omitted entirely, it falls back to the legacy behavior of splitting on spaces and/or commas.
# Valid values for version_path_separator are:
#
# version_path_separator = :
# version_path_separator = ;
# version_path_separator = space
# version_path_separator = os # Use os.pathsep. Default configuration used for new projects.
# set to 'true' to search source files recursively
# in each "version_locations" directory
# new in Alembic version 1.10
# recursive_version_locations = false
# the output encoding used when revision files
# are written from script.py.mako
# output_encoding = utf-8
# sqlalchemy.url = driver://user:pass@localhost/dbname
[post_write_hooks]
# post_write_hooks defines scripts or Python functions that are run
# on newly generated revision scripts. See the documentation for further
# detail and examples
# format using "black" - use the console_scripts runner, against the "black" entrypoint
# hooks = black
# black.type = console_scripts
# black.entrypoint = black
# black.options = -l 79 REVISION_SCRIPT_FILENAME
# lint with attempts to fix using "ruff" - use the exec runner, execute a binary
# hooks = ruff
# ruff.type = exec
# ruff.executable = %(here)s/.venv/bin/ruff
# ruff.options = --fix REVISION_SCRIPT_FILENAME
# Logging configuration
[loggers]
keys = root,sqlalchemy,alembic
[handlers]
keys = console
[formatters]
keys = generic
[logger_root]
level = WARN
handlers = console
qualname =
[logger_sqlalchemy]
level = DEBUG
handlers =
qualname = sqlalchemy.engine
[logger_alembic]
level = DEBUG
handlers =
qualname = alembic
[handler_console]
class = StreamHandler
args = (sys.stderr,)
level = NOTSET
formatter = generic
[formatter_generic]
format = %(levelname)-5.5s [%(name)s] %(message)s
datefmt = %H:%M:%S # env.py
import asyncio
import pathlib
import sys
from logging.config import fileConfig
from alembic import context
from sqlalchemy.ext.asyncio import create_async_engine
from sqlalchemy.ext.compiler import compiles
from sqlalchemy.schema import CreateTable
from sqlmodel import Field, SQLModel, text
sys.path.append(str(pathlib.Path(__file__).resolve().parents[1]))
# this is the Alembic Config object, which provides
# access to the values within the .ini file in use.
config = context.config
# Interpret the config file for Python logging.
# This line sets up loggers basically.
fileConfig(config.config_file_name)
class Hero(SQLModel, table=True):
id: int | None = Field(default=None, primary_key=True)
name: str = Field(index=True)
secret_name: str
age: int | None = Field(default=None, index=True)
target_metadata = SQLModel.metadata
db_url = str(
"mssql+aioodbc://sa:password123@127.0.0.1:1433/tempdb?driver=ODBC+Driver+18+for+SQL+Server&TrustServerCertificate=yes&Encrypt=yes"
)
def include_object(object, name, type_, reflected, compare_to):
"""Conditionally include objects in the migration."""
# Ignore celery tables in `celery` schema
if type_ == "table" and object.schema == "celery":
return False
return True
def run_migrations_offline():
"""
Run migrations in 'offline' mode.
This configures the context with just a URL
and not an Engine, though an Engine is acceptable
here as well. By skipping the Engine creation
we don't even need a DBAPI to be available.
Calls to context.execute() here emit the given string to the
script output.
"""
context.configure(
url=db_url,
target_metadata=target_metadata,
literal_binds=True,
compare_type=True,
include_object=include_object,
dialect_opts={"paramstyle": "named"},
)
with context.begin_transaction():
context.run_migrations()
def do_run_migrations(connection):
"""Run migrations in 'online' mode."""
context.configure(
connection=connection,
target_metadata=target_metadata,
include_object=include_object,
version_table_schema="migration",
)
with context.begin_transaction():
context.run_migrations()
async def run_migrations_online():
"""
Run migrations in 'online' mode.
In this scenario we need to create an Engine
and associate a connection with the context.
"""
connectable = create_async_engine(db_url, echo=False, future=True)
async with connectable.connect() as connection:
await connection.execute(
text(
"""
IF NOT EXISTS (SELECT * FROM sys.schemas WHERE name = 'migration')
BEGIN
EXEC('CREATE SCHEMA migration')
END;
"""
)
)
await connection.run_sync(do_run_migrations)
if context.is_offline_mode():
run_migrations_offline()
else:
asyncio.run(run_migrations_online()) alembic revision --autogenerate logs
Steps
|
Beta Was this translation helpful? Give feedback.
Replies: 1 comment 1 reply
-
these names:
are not expected to be present in ischema.tables so this would be some unknown behavior of the specific MS database you are using. the # sign indicates a temp table. you can instruct alembic to skip these names using include_name which hopefully will omit these names from the process that looks for columns. def include_name(name, type_, parent_names):
if type_ == "table" and "#" in name:
return False
return True
context.configure(
# ...
include_name = include_name
) |
Beta Was this translation helpful? Give feedback.
these names:
are not expected to be present in ischema.tables so this would be some unknown behavior of the specific MS database you are using. the # sign indicates a temp table.
you can instruct alembic to skip these names using include_name which hopefully will omit these names from the process that looks for columns.