text
stringlengths 1
843k
⌀ |
|---|
import reflex as rx
config = rx.Config(
app_name="Calculadora",
)
|
import reflex as rx
class State(rx.State):
expression: str = ""
result: str = ""
def add_to_expression(self, value: str):
self.expression += value
def clear(self):
self.expression = ""
self.result = ""
def calculate(self):
try:
self.result = str(eval(self.expression))
except Exception:
self.result = "Error"
def calculator() -> rx.Component:
buttons = [
"7", "8", "9", "/",
"4", "5", "6", "*",
"1", "2", "3", "-",
"0", ".", "=", "+"
]
return rx.center(
rx.vstack(
rx.text("Calculadora", font_size="2xl", color="black"),
rx.box(rx.text(State.expression, color="black"), height="40px"),
rx.box(rx.text(State.result, color="green"), height="40px"),
rx.grid(
*[
rx.button(
b,
on_click=(
State.calculate if b == "="
else lambda b=b: State.add_to_expression(b)
),
width="60px",
height="60px",
font_size="xl"
)
for b in buttons
],
columns="4",
gap=2,
),
rx.button("C", on_click=State.clear, color_scheme="red"),
spacing="4"
),
padding="20px",
bg="white",
)
app = rx.App()
app.add_page(calculator, title="Calculadora", route="/")
|
null |
import reflex as rx
config = rx.Config(
app_name="pyama_web",
)
|
import os
import numpy as np
import numba as nb
import scipy
import h5py
import skimage as sk
import cv2
import pandas as pd
import re
import math
# import matplotlib.pyplot as plt
import pathlib
import scipy.ndimage as smg
from nd2reader import ND2Reader
STRUCT3 = np.ones((3,3), dtype=np.bool_)
STRUCT5 = np.ones((5,5), dtype=np.bool_)
STRUCT5[[0,0,-1,-1], [0,-1,0,-1]] = False
@nb.njit
def window_std(img: np.ndarray) -> float:
"""
Calculate unnormed variance of 'img'
Refer to https://en.wikipedia.org/wiki/Variance#Unbiased_sample_variance
Refer to Pyama https://github.com/SoftmatterLMU-RaedlerGroup/pyama/tree/master
Parameters:
img (np.ndarray): Input image
Returns:
float: Unnormed variance of the image
"""
return np.sum((img - np.mean(img))**2)
@nb.njit
def generic_filter(img: np.ndarray, fun: callable, size: int = 3, reflect: bool = False) -> np.ndarray:
"""
Apply filter to image.
Parameters:
img (np.ndarray): The image to be filtered
fun (callable): The filter function to be applied, must accept subimage of 'img' as only argument and return a scalar. "Fun" stands for function and callable should stand for function in Python
size (int): The size (side length) of the kernel. Must be an odd integer
reflect (bool): Switch for border mode: True for 'reflect', False for 'mirror'. Reflect and Mirror should be filling the borders of the img.
Returns:
np.ndarray: Filtered image as a np.float64 array with same shape as 'img'
Raises:
ValueError: If 'size' is not an odd integer
"""
if size % 2 != 1:
raise ValueError("'size' must be an odd integer")
height, width = img.shape
s2 = size // 2
# Set up temporary image for correct border handling
img_temp = np.empty((height+2*s2, width+2*s2), dtype=np.float64)
img_temp[s2:-s2, s2:-s2] = img
if reflect:
img_temp[:s2, s2:-s2] = img[s2-1::-1, :]
img_temp[-s2:, s2:-s2] = img[:-s2-1:-1, :]
img_temp[:, :s2] = img_temp[:, 2*s2-1:s2-1:-1]
img_temp[:, -s2:] = img_temp[:, -s2-1:-2*s2-1:-1]
else:
img_temp[:s2, s2:-s2] = img[s2:0:-1, :]
img_temp[-s2:, s2:-s2] = img[-2:-s2-2:-1, :]
img_temp[:, :s2] = img_temp[:, 2*s2:s2:-1]
img_temp[:, -s2:] = img_temp[:, -s2-2:-2*s2-2:-1]
# Create and populate result image
filtered_img = np.empty_like(img, dtype=np.float64)
for y in range(height):
for x in range(width):
filtered_img[y, x] = fun(img_temp[y:y+2*s2+1, x:x+2*s2+1])
return filtered_img
def binarize_frame(img: np.ndarray, mask_size: int = 3) -> np.ndarray:
"""
Coarse segmentation of phase-contrast image frame
Refer to OpenCV tutorials for more information on binarization/thresholding techniques.
Parameters:
img (np.ndarray): The image to be binarized
mask_size (int): The size of the mask to be used in the binarization process (mask refers to kernel size in image processing)
Returns:
np.ndarray: Binarized image of frame
"""
# Get logarithmic standard deviation at each pixel
std_log = generic_filter(img, window_std, size=mask_size)
std_log[std_log>0] = (np.log(std_log[std_log>0]) - np.log(mask_size**2 - 1)) / 2
# Get width of histogram modulus
counts, edges = np.histogram(std_log, bins=200)
bins = (edges[:-1] + edges[1:]) / 2
hist_max = bins[np.argmax(counts)]
sigma = np.std(std_log[std_log <= hist_max])
# Apply histogram-based threshold
img_bin = std_log >= hist_max + 3 * sigma
# Remove noise
img_bin = smg.binary_dilation(img_bin, structure=STRUCT3)
img_bin = smg.binary_fill_holes(img_bin)
img_bin &= smg.binary_opening(img_bin, iterations=2, structure=STRUCT5)
img_bin = smg.binary_erosion(img_bin, border_value=1)
return img_bin
def csv_output(out_dir: str, pos: list, mins: float, use_square_rois: bool = True) -> None:
"""
Generate CSV output for tracked positions
Parameters:
out_dir (str): Output directory path
pos (list): List of positions to process
mins (float): Minutes per frame
use_square_rois (bool): Whether to use square ROIs
Returns:
None
"""
folders = get_tracked_folders(out_dir,pos)
for folder in folders:
csv_output_position(folder[0],folder[1],mins,use_square_rois)
def csv_output_position(pos: int, pos_path: pathlib.Path, mins: float, use_square_rois: bool) -> None:
"""
Generate CSV output for a single position
Parameters:
pos (int): Position number
pos_path (pathlib.Path): Path to position directory
mins (float): Minutes per frame
use_square_rois (bool): Whether to use square ROIs
Returns:
None
"""
tracks_path = pos_path.joinpath('tracks.csv')
tracks = pd.read_csv(tracks_path.absolute(),index_col=0)
data_path = pos_path.joinpath('data.h5')
with h5py.File(data_path.absolute(), "r") as data:
frames = range(data.attrs['frame_min'],data.attrs['frame_max']+1)
fl_channel_names = data.attrs['fl_channel_names']
excel_path = pos_path.joinpath('output.xlsx')
particles = [int(p) for p in tracks['particle'].unique()]
particles.sort()
print("Starting Data Export for position:",str(pos))
with pd.ExcelWriter(excel_path.absolute()) as writer:
if use_square_rois == True and 'square_area' in tracks:
area = csv_get_table(particles,tracks,frames,mins,'square_area')
else:
area = csv_get_table(particles,tracks,frames,mins,'area')
area.to_excel(writer, sheet_name='Area', index=False)
for i in range(len(fl_channel_names)):
col_name = 'brightness_' + str(i)
if use_square_rois == True and 'square_' + col_name in tracks:
brightness = csv_get_table(particles,tracks,frames,mins,'square_' + col_name)
else:
brightness = csv_get_table(particles,tracks,frames,mins,col_name)
brightness.to_excel(writer, sheet_name=fl_channel_names[i], index=False)
table_to_image(pos_path,particles,brightness,fl_channel_names[i])
print('Done')
def table_to_image(pos_path: pathlib.Path, particles: list, table: pd.DataFrame, name: str) -> None:
"""
Convert table data to image and save it.
This is a post-processing step.
These converts the table data to the fluorescent tracks image.
Parameters:
pos_path (pathlib.Path): Path to position directory
particles (list): List of particle IDs
table (pd.DataFrame): Data table
name (str): Name for the output file
"""
# Import matplotlib and set backend at the start of the function
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
# Create figure without displaying it
fig = plt.figure()
for p in particles:
plt.plot(table['time'].values, table[str(p)].values, color='gray', alpha=0.5)
plt.xlabel('Time (Frame)')
plt.ylabel('Brightness (Pixelsum)')
plt.title(name)
plt.tight_layout()
# Save figure and close it
fig.savefig(pos_path.joinpath(name + '.png').absolute())
plt.close(fig)
def csv_get_table(particles: list, tracks: pd.DataFrame, frames: list, mins: float, col: str) -> pd.DataFrame:
"""
Post-processing step that converts from TrackPy to Pyama format.
Extract data from tracks and create a table.
Parameters:
particles (list): List of particle IDs
tracks (pd.DataFrame): Tracking data
frames (list): List of frame numbers
mins (float): Minutes per frame
col (str): Column name to extract
Returns:
pd.DataFrame: Extracted data table
"""
keys = []
keys.append('time')
for p in particles:
keys.append(str(p))
data = {}
for key in keys:
data[key] = []
print('Fetching Data:', col)
for f in frames:
#print('Frame',f)
data['time'].append(f * mins / 60)
for p in particles:
t = tracks[(tracks['particle'] == p) & (tracks['frame'] == f)]
if len(t) > 0:
data[str(p)].append(t.iloc[0][col])
else:
# change this behaviour if memory during tracking is used
data[str(p)].append(0)
return pd.DataFrame(data)
def square_roi(out_dir: str, pos: list, micron_size: float) -> None:
"""
Post-processing step where the micron_size defines the length of the squares.
Apply square ROI to tracked positions
Parameters:
out_dir (str): Output directory path
pos (list): List of positions to process
micron_size (float): Size of ROI in microns
Returns:
None
"""
folders = get_tracked_folders(out_dir,pos)
print(folders)
for folder in folders:
square_roi_position(folder[0],folder[1],micron_size)
def square_roi_position(pos: int, pos_path: pathlib.Path, micron_size: float) -> None:
"""
Post-processing step where the micron_size defines the length of the squares.
Apply square ROI to a single position.
Parameters:
pos (int): Position number
pos_path (pathlib.Path): Path to position directory
micron_size (float): Size of ROI in microns
Returns:
None
"""
tracks_path = pos_path.joinpath('tracks.csv')
tracks = pd.read_csv(tracks_path.absolute(),index_col=0)
data_path = pos_path.joinpath('data.h5')
data = h5py.File(data_path.absolute(), "r")
size = math.ceil(micron_size / data.attrs['pixel_microns'])
width,height = data.attrs['width'],data.attrs['height']
print("Starting Square ROIs for position:",str(pos))
for frame in sorted(tracks['frame'].unique()):
frame_data_index = frame-data.attrs['frame_min']
print("Frame",str(int(frame)))
# t = tracks[(tracks['frame'] == frame) & (tracks['enabled'] == True)]
true_values = [1, '1', '1.0', 1.0, True, 'True', 'true', 'TRUE']
# Convert true_values to lowercase strings
true_values_lower = [str(v).lower() for v in true_values]
# Create the enabled condition:
# 1. Convert the enabled column to string
# 2. Convert all values to lowercase
# 3. Check if they're in our true_values list
enabled_condition = tracks['enabled'].astype(str).str.lower().isin(true_values_lower)
# Apply both conditions to filter the dataframe
t = tracks[
(tracks['frame'] == frame) & (enabled_condition)]
for index, record in t.iterrows():
x = int((record['bbox_x1'] + record['bbox_x2']) // 2)
y = int((record['bbox_y1'] + record['bbox_y2']) // 2)
x1 = max(0,x - size)
y1 = max(0,y - size)
x2 = min(height-1,x + size)
y2 = min(width-1,y + size)
tracks.loc[(tracks['frame'] == frame) & (tracks['particle'] == record['particle']), 'square_area'] = (x2-x1) * (y2-y1)
for i in range(len(data.attrs['fl_channels'])):
im_slice = data['fluorescence'][int(frame_data_index),i][x1:x2,y1:y2]
tracks.loc[(tracks['frame'] == frame) & (tracks['particle'] == record['particle']), 'square_brightness_' + str(i)] = im_slice.sum()
data.close()
tracks.to_csv(tracks_path.absolute())
print("Done")
# to be deprecated since the function above is the only one being used.
def square_roi_position_old(nd2_path: str, out_dir: str, pos: int, micron_size: float) -> None:
"""
Old version of square ROI application to a single position
Parameters:
nd2_path (str): Path to ND2 file
out_dir (str): Output directory path
pos (int): Position number
micron_size (float): Size of ROI in microns
Returns:
None
"""
if not pathlib.Path(nd2_path).is_file():
print("Invalid ND2 Path")
return
nd2 = ND2Reader(nd2_path)
pos_dir = position_path(out_dir,pos)
if pos_dir is None:
print("Could not find directory")
return
tracks_path = pos_dir.joinpath('tracks.csv')
if not tracks_path.is_file():
print("Could not find features.csv")
return
tracks = pd.read_csv(tracks_path.absolute())
# pixel_microns: the amount of microns per pixel
size = math.ceil(micron_size / nd2.metadata['pixel_microns'])
data_path = pos_dir.joinpath('data.h5')
if not data_path.is_file():
print("Could not find data.h5")
return
tracks['square_mass'] = 0
data = h5py.File(data_path.absolute(), "r")
width,height = nd2.metadata['width'],nd2.metadata['height']
print("Starting Square ROIs for position " + str(pos))
for frame in sorted(tracks['frame'].unique()):
frame_data_index = frame-data.attrs['frame_min']
print("Frame " + str(int(frame)))
t = tracks[(tracks['frame'] == frame) & (tracks['enabled'] == True)]
#t = tracks[(tracks['frame'] == frame)]
fl_image = data['bg_corr'][int(frame_data_index)]
for index, record in t.iterrows():
x = int((record['bbox_x1'] + record['bbox_x2']) // 2)
y = int((record['bbox_y1'] + record['bbox_y2']) // 2)
x1 = max(0,x - size)
y1 = max(0,y - size)
x2 = min(height-1,x + size)
y2 = min(width-1,y + size)
im_slice = fl_image[x1:x2,y1:y2]
tracks.loc[(tracks['frame'] == frame) & (tracks['particle'] == record['particle']), 'square_mass'] = im_slice.sum()
data.close()
tracks.to_csv(tracks_path.absolute())
print("Done")
def get_position_folders(out_dir: str) -> list:
"""
Get a list of position folders from the output directory
Parameters:
out_dir (str): Output directory path
Returns:
list: List of tuples containing position number and path
"""
folders = []
for path in pathlib.Path(out_dir).iterdir():
if not path.is_dir():
continue
if not re.search('^XY0*\\d+$', path.name):
continue
number_str = path.name[2:].lstrip('0')
pos = int(number_str) if number_str else 0
folders.append((pos,path))
return folders
def get_tracking_folders(out_dir: str, pos: list) -> list:
"""
Get a list of tracking folders for specified positions
Parameters:
out_dir (str): Output directory path
pos (list): List of position numbers
Returns:
list: List of tuples containing position number and path
"""
pos = list(set(pos))
pos_folders = get_position_folders(out_dir)
if len(pos) > 0:
pos_folders = [p for p in pos_folders if p[0] in pos]
folders = []
for folder in pos_folders:
features_path = folder[1].joinpath('features.csv')
if not features_path.is_file():
print("Position " + str(folder[0]) + ":", "Could not find features.csv")
continue
data_path = folder[1].joinpath('data.h5')
if not data_path.is_file():
print("Position " + str(folder[0]) + ":", "Could not find data.h5")
continue
folders.append(folder)
return folders
def get_tracked_folders(out_dir: str, pos: list) -> list:
"""
Get a list of tracked folders for specified positions
Parameters:
out_dir (str): Output directory path
pos (list): List of position numbers
Returns:
list: List of tuples containing position number and path
"""
pos = list(set(pos))
pos_folders = get_position_folders(out_dir)
if len(pos) > 0:
pos_folders = [p for p in pos_folders if p[0] in pos]
folders = []
for folder in pos_folders:
features_path = folder[1].joinpath('features.csv')
if not features_path.is_file():
print("Position " + str(folder[0]) + ":", "Could not find features.csv")
continue
data_path = folder[1].joinpath('data.h5')
if not data_path.is_file():
print("Position " + str(folder[0]) + ":", "Could not find data.h5")
continue
tracks_path = folder[1].joinpath('tracks.csv')
if not tracks_path.is_file():
print("Position " + str(folder[0]) + ":", "Could not find tracks.csv")
continue
folders.append(folder)
return folders
def tracking_pyama(out_dir: str, pos: list, expand: int = 0) -> None:
"""
Perform Pyama tracking on specified positions and saves them into the output directory
Parameters:
out_dir (str): Output directory path
pos (list): List of position numbers
expand (int): Expansion factor for labels
Returns:
None
"""
folders = get_tracking_folders(out_dir,pos)
for folder in folders:
track_position_pyama(folder[0],folder[1],expand)
def track_position_pyama(pos: int, pos_path: pathlib.Path, expand: int) -> None:
"""
Perform Pyama tracking on a single position.
data.h5 contains the segmentation and the background corrected fluorescence images.
features.csv contains the features of the particles. Bounding boxes, integrated fluorescence.
The track is being saved as tracks.csv file
Parameters:
pos (int): Position number
pos_path (pathlib.Path): Path to position directory
expand (int): Expansion factor for labels
Returns:
None
"""
features_path = pos_path.joinpath('features.csv')
features = pd.read_csv(features_path.absolute(),index_col=0)
data_path = pos_path.joinpath('data.h5')
data = h5py.File(data_path.absolute(), "r")
data_labels = data['labels']
min_track_length = data.attrs['frame_max']-data.attrs['frame_min']+1
tracks = []
frames = features['frame'].unique()
frames.sort()
print("Starting Pyama Tracking for position " + str(pos))
for frame in frames:
print("Frame " + str(frame))
frame_data_index = frame-data.attrs['frame_min']
frame_features = features[features['frame'] == frame]
if len(tracks) == 0:
for index, row in frame_features.iterrows():
tracks.append([row])
else:
matched_labels = []
frame_labels = data_labels[frame_data_index]
prev_labels = data_labels[frame_data_index-1]
# Add optional label expansion here
if expand > 0:
frame_labels = sk.segmentation.expand_labels(frame_labels,expand)
prev_labels = sk.segmentation.expand_labels(prev_labels,expand)
# Add frames left to check if len(track) + frames_left < min_frames
remove_indices = []
for i in range(len(tracks)):
track = tracks[i]
prev_row = track[len(track)-1]
# no memory so ignore any lost
if frame - prev_row['frame'] > 1:
remove_indices.append(i)
# dont add track to completed (we only want entire tracks)
continue
#frame_labels = data_labels[frame_data_index]
#prev_labels = data_labels[frame_data_index-1]
# Add optional label expansion here
#if expand > 0:
#frame_labels = sk.segmentation.expand_labels(frame_labels,expand)
#prev_labels = sk.segmentation.expand_labels(prev_labels,expand)
label_slice = frame_labels[prev_labels == prev_row['label']]
found_labels = sorted(np.unique(label_slice))
if len(found_labels) > 0 and found_labels[0] == 0:
found_labels.pop(0)
if len(found_labels) == 0:
# No match for this track
continue
#print(found_labels)
local_matches = []
for label in found_labels:
row = frame_features[frame_features['label'] == label].iloc[0]
# already found parent
if row['label'] in matched_labels:
continue
local_matches.append({'s': row['area'], 'r': row})
if len(local_matches) > 0:
local_matches = sorted(local_matches, key=lambda r: r['s'], reverse=True)
selected_match = local_matches[0]
track.append(selected_match['r'])
matched_labels.append(selected_match['r']['label'])
# Remove tracks that can be ignored
if len(remove_indices) > 0:
remove_indices.reverse()
for index in remove_indices:
tracks.pop(index)
unmatched_rows = frame_features[~np.isin(frame_features['label'],matched_labels)]
for index, row in unmatched_rows.iterrows():
tracks.append([row])
data.close()
result_data = []
particle_id = 0
for track in tracks:
if len(track) < min_track_length:
continue
for row in track:
row['particle'] = particle_id
row['enabled'] = True
result_data.append(row)
particle_id += 1
tracks = pd.DataFrame(result_data)
# Find large particles and disable
large_particles = tracks[tracks['area'] > 10000]['particle'].unique()
tracks.loc[np.isin(tracks['particle'], large_particles), 'enabled'] = False
tracks_path = pos_path.joinpath('tracks.csv')
tracks.to_csv(tracks_path.absolute())
print("Done")
def position_path(out_dir: str, pos: int) -> pathlib.Path:
"""
Get the path for a specific position
Parameters:
out_dir (str): Output directory path
pos (int): Position number
Returns:
pathlib.Path: Path to the position directory
"""
for path in pathlib.Path(out_dir).iterdir():
if not path.is_dir():
continue
if not re.search('XY0*' + str(pos) + '$', path.name):
continue
return path
return None
def pyama_segmentation(img: np.ndarray) -> np.ndarray:
"""
Perform Pyama segmentation on an image
Parameters:
img (np.ndarray): Input image
Returns:
np.ndarray: Labeled segmentation of the image
"""
binary_segmentation = binarize_frame(img)
# remove small objects MIN_SIZE=1000
sk.morphology.remove_small_objects(binary_segmentation,min_size=1000,out=binary_segmentation)
# convert binary mask to labels (1,2,3,...)
return sk.measure.label(binary_segmentation, connectivity=1)
def segment_positions(nd2_path: str, out_dir: str, pos: list, seg_channel: int, fl_channels: list, frame_min: int = None, frame_max: int = None, bg_corr: bool = True) -> None:
"""
Segment positions from an ND2 file
Parameters:
nd2_path (str): Path to ND2 file
out_dir (str): Output directory path
pos (list): List of position numbers
seg_channel (int): Segmentation channel index
fl_channels (list): List of fluorescence channel indices
frame_min (int): Minimum frame number
frame_max (int): Maximum frame number
bg_corr (bool): Whether to perform background correction
Returns:
None
"""
if not pathlib.Path(nd2_path).is_file():
print("Invalid ND2 Path")
return
fl_channels = list(set(fl_channels))
pos = list(set(pos)) # remove duplicates
nd2 = ND2Reader(nd2_path)
if seg_channel < 0 or seg_channel > len(nd2.metadata['channels']) - 1:
print("Invalid Segmentation Channel")
return
for c in fl_channels:
if c < 0 or c > len(nd2.metadata['channels']) - 1:
print("Invalid Fluorescence Channel")
return
positions = list(nd2.metadata['fields_of_view'])
if len(pos) > 0:
positions = [p for p in positions if p in pos]
if len(positions) == 0:
print("Invalid Positions")
return
fl_channel_names = [nd2.metadata['channels'][c] for c in fl_channels]
try:
# Check and calculate padding
max_field = max(nd2.metadata['fields_of_view'])
if max_field > 0:
padding = int(np.ceil(np.log10(max_field)))
else:
# Save metadata to a text file
with open("metadata_output.txt", "w") as file:
file.write(str(nd2.metadata))
print("Warning: fields_of_view contains zero or negative values.")
padding = 0 # or any default you prefer
except KeyError:
print("Error: 'fields_of_view' key not found in metadata.")
padding = 0 # or any default you prefer
frames = list(nd2.metadata['frames'])
if frame_min is not None:
if frame_min not in frames:
print('Invalid frame_min')
return
else:
frame_min = frames[0]
if frame_max is not None:
if frame_max not in frames:
print('Invalid frame_max')
return
else:
frame_max = frames[-1]
if frame_max < frame_min:
print('frame_max must be greater or equal to frame_min')
return
frames = [f for f in frames if frame_min <= f <= frame_max]
width, height, num_frames = nd2.metadata['width'], nd2.metadata['height'], len(frames)
print('Segmentation Channel: ' + nd2.metadata['channels'][seg_channel])
print('Fluorescence Channels: ' + ', '.join(fl_channel_names))
for pos in positions:
print(f"Segmenting position {pos}")
pos_dir = pathlib.Path(out_dir).joinpath(f'XY{str(pos).zfill(padding)}')
pos_dir.mkdir(parents=True, exist_ok=True)
file_path = pos_dir.joinpath('data.h5')
feature_keys = ['x', 'y'] + [f'brightness_{i}' for i in range(len(fl_channels))] + ['area', 'frame', 'label', 'bbox_x1', 'bbox_x2', 'bbox_y1', 'bbox_y2']
feature_data = {key: [] for key in feature_keys}
with h5py.File(file_path.absolute(), "w") as file_handle:
data_labels = file_handle.create_dataset('labels', (num_frames, height, width), dtype=np.uint16, chunks=(1, height, width))
data_fl = file_handle.create_dataset('fluorescence', (num_frames, len(fl_channels), height, width), dtype=np.float64, chunks=(1, 1, height, width))
file_handle.attrs['frame_min'] = frame_min
file_handle.attrs['frame_max'] = frame_max
file_handle.attrs['seg_channel'] = seg_channel
file_handle.attrs['fl_channels'] = fl_channels
file_handle.attrs['fl_channel_names'] = fl_channel_names
file_handle.attrs['width'] = nd2.metadata['width']
file_handle.attrs['height'] = nd2.metadata['height']
file_handle.attrs['pixel_microns'] = nd2.metadata['pixel_microns']
for index, frame in enumerate(frames):
frame_image = nd2.get_frame_2D(t=frame, c=seg_channel, v=pos)
binary_segmentation = binarize_frame(frame_image)
sk.morphology.remove_small_objects(binary_segmentation, min_size=1000, out=binary_segmentation)
label_segmentation = sk.measure.label(binary_segmentation, connectivity=1)
frame_fl_images = []
for c in fl_channels:
frame_fl_image = nd2.get_frame_2D(t=frame, c=c, v=pos)
if bg_corr:
frame_fl_image = background_correction(frame_fl_image, label_segmentation, 5, 5, 0.5)
frame_fl_images.append(frame_fl_image)
props = sk.measure.regionprops(label_segmentation)
print(f"Frame {frame}: {len(props)} features")
for prop in props:
if prop.bbox[0] == 0 or prop.bbox[1] == 0 or prop.bbox[2] == height or prop.bbox[3] == width:
label_segmentation[label_segmentation == prop.label] = 0
continue
x, y = prop.centroid
feature_data['x'].append(x)
feature_data['y'].append(y)
for i, fl_image in enumerate(frame_fl_images):
feature_data[f'brightness_{i}'].append(fl_image[tuple(prop.coords.T)].sum())
feature_data['area'].append(prop.area)
feature_data['frame'].append(frame)
feature_data['label'].append(prop.label)
feature_data['bbox_x1'].append(prop.bbox[0])
feature_data['bbox_y1'].append(prop.bbox[1])
feature_data['bbox_x2'].append(prop.bbox[2] - 1)
feature_data['bbox_y2'].append(prop.bbox[3] - 1)
data_labels[index, :, :] = label_segmentation
for i, fl_image in enumerate(frame_fl_images):
data_fl[index, i, :, :] = fl_image
features_path = pos_dir.joinpath('features.csv')
features = pd.DataFrame(feature_data)
features.to_csv(features_path.absolute())
print("Done")
def background_spline(image, img_mask, countX, countY, overlap):
"""
Creates a background model using a grid of sampling points and spline interpolation.
Used for background correction of microscopy images by modeling systematic
illumination variations. Part of the pipeline for processing fluorescence data.
Parameters:
image (np.ndarray): Input microscopy image
img_mask (np.ndarray): Binary mask of regions to exclude (e.g. cells)
countX (int): Number of grid points in X direction
countY (int): Number of grid points in Y direction
overlap (float): Overlap between grid windows (0-1)
Returns:
np.ndarray: Interpolated background map same size as input image
"""
# Get image dimensions
h,w = image.shape
# Calculate size of sampling windows based on grid density and overlap
sizeX = int(w/((countX - (countX-1)*overlap)*2))
sizeY = int(h/((countY - (countY-1)*overlap)*2))
# Create grid points for sampling background
pointsX = np.linspace(sizeX,w-(sizeX),countX).astype(int)
pointsY = np.linspace(sizeY,h-(sizeY),countY).astype(int)
# Create masked array to ignore foreground objects
masked_img = np.ma.masked_array(image, mask=img_mask)
# Sample background at each grid point
pos = []
vals = []
for ix in range(len(pointsX)):
for iy in range(len(pointsY)):
x = pointsX[ix]
y = pointsY[iy]
# Get sampling window boundaries
x1,x2 = max(0,x-sizeX),min(w-1,x+sizeX)
y1,y2 = max(0,y-sizeY),min(h-1,y+sizeY)
# Extract window and calculate statistics
sub_image = masked_img[y1:y2,x1:x2]
vals.append([np.ma.mean(sub_image),np.ma.median(sub_image),np.ma.var(sub_image)])
pos.append([x,y,ix,iy])
# Convert to numpy arrays
vals = np.array(vals)
pos = np.array(pos)
# Create support points for spline interpolation using median values
fit_support = np.empty((countX, countY))
for i in range(len(pos)):
fit_support[pos[i,2],pos[i,3]] = vals[i,1]
# Interpolate background using bicubic spline
bg_spline = scipy.interpolate.RectBivariateSpline(x=pointsX, y=pointsY, z=fit_support)
return bg_spline(x=range(w), y=range(h)).T
def background_correction(image,img_mask,countX,countY,overlap = 0.1):
"""
"""
h,w = image.shape
patch = background_spline(image,img_mask,countX,countY,overlap)
bg_mean = patch.mean()
bg_interp = patch.copy()
A = np.divide(bg_interp, bg_mean)
bg_interp = np.subtract(image, bg_interp)
bg_interp = np.divide(bg_interp, np.median(A, axis=0, keepdims=True))
return bg_interp
def moonraedler_dir():
p = pathlib.Path('/project/ag-moonraedler')
if p.is_dir():
return p.absolute()
p = pathlib.Path('//z-sv-dfsroot.ad.physik.uni-muenchen.de/dfsextern/project/ag-moonraedler')
if p.is_dir():
return p.absolute()
|
import cv2
import numpy as np
import tifffile
from io import BytesIO
import base64
from PIL import Image
def read_tiff(file_path, channel=0):
"""
Reads a TIFF file and returns the data as a numpy array.
"""
tif = tifffile.TiffFile(file_path)
tif_data = tif.asarray()[channel]
return tif_data
def map_uint16_to_uint8(img, lower_bound=None, upper_bound=None):
'''
Map a 16-bit image trough a lookup table to convert it to 8-bit.
Parameters
----------
img: numpy.ndarray[np.uint16]
image that should be mapped
lower_bound: int, optional
lower bound of the range that should be mapped to ``[0, 255]``,
value must be in the range ``[0, 65535]`` and smaller than `upper_bound`
(defaults to ``numpy.min(img)``)
upper_bound: int, optional
upper bound of the range that should be mapped to ``[0, 255]``,
value must be in the range ``[0, 65535]`` and larger than `lower_bound`
(defaults to ``numpy.max(img)``)
Returns
-------
numpy.ndarray[uint8]
'''
if not(0 <= lower_bound < 2**16) and lower_bound is not None:
raise ValueError(
'"lower_bound" must be in the range [0, 65535]')
if not(0 <= upper_bound < 2**16) and upper_bound is not None:
raise ValueError(
'"upper_bound" must be in the range [0, 65535]')
if lower_bound is None:
lower_bound = np.min(img)
if upper_bound is None:
upper_bound = np.max(img)
if lower_bound >= upper_bound:
raise ValueError(
'"lower_bound" must be smaller than "upper_bound"')
lut = np.concatenate([
np.zeros(lower_bound, dtype=np.uint16),
np.linspace(0, 255, upper_bound - lower_bound).astype(np.uint16),
np.ones(2**16 - upper_bound, dtype=np.uint16) * 255
])
return lut[img].astype(np.uint8)
def get_channel_image(tiff_data, channel):
"""
Returns the image for the specified channel as a base64 encoded string.
"""
# Get the channel data
channel_data = tiff_data[:, :, channel]
# Normalize the data to 0-255 range
channel_data = (channel_data - np.min(channel_data)) / (np.max(channel_data) - np.min(channel_data)) * 255
# Convert the data to uint8 type
channel_data = channel_data.astype(np.uint8)
# Convert the data to a base64 encoded string
return tifffile.imwrite(channel_data)
def numpy_to_b64_string(image):
rawBytes = BytesIO()
im = Image.fromarray(image)
im.save(rawBytes, format="JPEG")
rawBytes.seek(0)
image = base64.b64encode(rawBytes.getvalue())
img_str = image.decode('utf-8')
return img_str
def extract_overlay(image_path, vmin_bf_channel=0, vmax_bf_channel=40000, vmin_overlay_red_channel=4, vmax_overlay_red_channel=400, path=True):
if path is True:
tiff = tifffile.TiffFile(image_path)
else:
tiff = image_path
red = cv2.cvtColor(map_uint16_to_uint8(tiff.asarray()[1], lower_bound=vmin_overlay_red_channel, upper_bound=vmax_overlay_red_channel), cv2.COLOR_GRAY2BGR)
red[:,:,2]=0
red[:,:,1]=0
gray = cv2.cvtColor(map_uint16_to_uint8(tiff.asarray()[0], lower_bound=vmin_bf_channel, upper_bound=vmax_bf_channel), cv2.COLOR_GRAY2BGR)
result = cv2.add((red), (gray))
return result
|
from flask import Flask, render_template, request, redirect, url_for, jsonify
from src.pyama_web.backend.gui import CellViewer
import src.pyama_web.core.pyama_util as pyama_util
import os
class App:
def __init__(self):
self.app = Flask(__name__)
self.cell_viewer = None
def routes(self):
@self.app.route('/test')
def test():
return 'test'
@self.app.route('/')
def index():
return render_template('index.html')
@self.app.route('/select_paths', methods=['POST'])
def select_paths():
data = request.json
nd2_path = data['nd2_path']
out_path = data['out_path']
redirect_to = data['redirect_to']
if not nd2_path or not out_path:
return jsonify({'error': 'Both ND2 path and output path must be selected'}), 400
init_type = 'view' if redirect_to == 'view' else 'analysis'
self.cell_viewer = CellViewer(nd2_path=nd2_path, output_path=out_path, init_type=init_type)
self.cell_viewer.nd2_path = nd2_path
self.cell_viewer.output_path = out_path
if redirect_to == 'view':
return jsonify({'redirect': url_for('view')})
elif redirect_to == 'analysis':
return jsonify({'redirect': url_for('analysis')})
else:
return jsonify({'redirect': url_for('index')})
@self.app.route('/view', methods=['GET', 'POST'])
def view():
if self.cell_viewer is None:
return redirect(url_for('index'))
self.cell_viewer.position_changed()
current_particle_index = self.cell_viewer.particle_index()
return render_template('view.html',
channel_image=self.cell_viewer.return_image(),
n_positions=len(self.cell_viewer.positions),
n_channels=self.cell_viewer.channel_max,
n_frames=self.cell_viewer.frame_max,
all_particles_len=self.cell_viewer.all_particles_len,
current_particle_index=current_particle_index,
brightness_plot=self.cell_viewer.brightness_plot,
disabled_particles=self.cell_viewer.disabled_particles)
@self.app.route('/preprocess', methods=['GET', 'POST'])
def processing():
return render_template('preprocess.html')
@self.app.route('/documentation', methods=['GET', 'POST'])
def documentation():
svg = "static/images/UserTutorial.svg"
return render_template('documentation.html', svg=svg)
@self.app.route('/update_image', methods=['GET', 'POST'])
def update_image():
new_position = int(request.form['position'])
new_channel = int(request.form['channel'])
new_frame = int(request.form['frame'])
new_particle = int(request.form['particle'])
if new_position != self.cell_viewer.position:
self.cell_viewer.position = self.cell_viewer.position_options[new_position]
self.cell_viewer.position_changed()
if new_particle != self.cell_viewer.particle:
self.cell_viewer.particle = self.cell_viewer.all_particles[new_particle]
self.cell_viewer.particle_changed()
self.cell_viewer.channel = new_channel
self.cell_viewer.frame = new_frame
self.cell_viewer.get_channel_image()
self.cell_viewer.draw_outlines()
return jsonify({
'channel_image': self.cell_viewer.return_image(),
'brightness_plot': self.cell_viewer.brightness_plot,
'all_particles_len': self.cell_viewer.all_particles_len,
'particle_enabled': self.cell_viewer.particle_enabled,
'current_particle': self.cell_viewer.particle,
'disabled_particles': self.cell_viewer.disabled_particles
})
@self.app.route('/update_particle_enabled', methods=['POST'])
def update_particle_enabled():
data = request.json
enabled = data['enabled']
if self.cell_viewer:
self.cell_viewer.particle_enabled = enabled
self.cell_viewer.particle_enabled_changed()
return jsonify({
'channel_image': self.cell_viewer.return_image(),
'brightness_plot': self.cell_viewer.brightness_plot,
'all_particles_len': self.cell_viewer.all_particles_len,
'disabled_particles': self.cell_viewer.disabled_particles
})
return jsonify({'error': 'Cell viewer not initialized'}), 400
@self.app.route('/do_segmentation', methods=['POST'])
def do_segmentation():
data = request.json
nd2_path = self.cell_viewer.nd2_path
out_dir = self.cell_viewer.output_path
positions = list(range(data['position_min'], data['position_max'] + 1))
frame_min = data['frame_min']
frame_max = data['frame_max']
segmentation_channel = []
fluorescence_channels = []
for i in range(self.cell_viewer.channel_max + 1):
channel_type = data[f'channel_{i}']
if channel_type == 'Brightfield':
segmentation_channel.append(i)
elif channel_type == 'Fluorescent':
fluorescence_channels.append(i)
segmentation_channel = segmentation_channel[0] if len(segmentation_channel) == 1 else segmentation_channel
pyama_util.segment_positions(nd2_path, out_dir, positions, segmentation_channel, fluorescence_channels, frame_min=frame_min, frame_max=frame_max)
return jsonify({'status': 'success'})
@self.app.route('/do_tracking', methods=['POST'])
def do_tracking():
data = request.json
out_dir = self.cell_viewer.output_path
positions = list(range(data['position_min'], data['position_max'] + 1))
expand_labels = data['expand_labels']
pyama_util.tracking_pyama(out_dir, positions, expand=expand_labels)
return jsonify({'status': 'success'})
@self.app.route('/do_square_rois', methods=['POST'])
def do_square_rois():
data = request.json
out_dir = self.cell_viewer.output_path
positions = list(range(data['position_min'], data['position_max'] + 1))
square_um_size = data['square_size']
pyama_util.square_roi(out_dir, positions, square_um_size)
return jsonify({'status': 'success'})
@self.app.route('/do_export', methods=['POST'])
def do_export():
data = request.json
out_dir = self.cell_viewer.output_path
positions = list(range(data['position_min'], data['position_max'] + 1))
minutes = data['minutes']
try:
pyama_util.csv_output(out_dir, positions, minutes)
return jsonify({'status': 'success'})
except Exception as e:
return jsonify({'status': 'error', 'message': str(e)}), 400
@self.app.route('/analysis')
def analysis():
if self.cell_viewer is None:
return redirect(url_for('index'))
n_positions = len(self.cell_viewer.nd2.metadata['fields_of_view'])+1
return render_template('analysis.html',
n_positions=n_positions,
n_channels=self.cell_viewer.channel_max,
n_frames=self.cell_viewer.frame_max)
@self.app.route('/list_directory', methods=['GET'])
def list_directory():
path = request.args.get('path', '/')
try:
items = os.listdir(path)
return jsonify({
'path': path,
'items': [{'name': item, 'isDirectory': os.path.isdir(os.path.join(path, item))} for item in items]
})
except Exception as e:
return jsonify({'error': str(e)}), 400
@self.app.route('/select_folder', methods=['POST'])
def select_folder():
path = request.json['path']
# Here you can add logic to handle the selected folder
return jsonify({'message': f'Folder selected: {path}'})
def load_paths(self, file_path):
with open(file_path, mode='r') as file:
paths = [line.strip() for line in file]
return paths
def run(self):
self.app.run(host='0.0.0.0', port=8000, debug=True)
app_instance = App()
app_instance.routes()
flask_app = app_instance.app
if __name__ == '__main__':
app_instance.run()
|
import os
import re
import cv2
import h5py
import numpy as np
import pandas as pd
import plotly.graph_objs as go
from plotly.subplots import make_subplots
from nd2reader import ND2Reader
import plotly.io as pio
from time import sleep
from io import BytesIO
import base64
from PIL import Image
import pathlib
import warnings
warnings.filterwarnings("ignore", category=np.VisibleDeprecationWarning)
def are_all_enabled(group):
"""
Check if all values in the group are equal to 1.
Returns True if all values are 1, False otherwise.
"""
return (group == 1).all()
def numpy_to_b64_string(image):
rawBytes = BytesIO()
im = Image.fromarray(image)
im.save(rawBytes, format="JPEG")
rawBytes.seek(0)
image = base64.b64encode(rawBytes.getvalue())
img_str = image.decode('utf-8')
return img_str
class CellViewer:
def __init__(self, nd2_path, output_path, init_type='view'):
self.output_path = pathlib.Path(output_path)
self.output_path = output_path
self.nd2 = ND2Reader(nd2_path)
self.file = None
self.COLOR_GRAY = '#808080'
self.COLOR_RED = 'Red'
self.COLOR_ORANGE = '#FF8C00'
self.OPACITY_SELECTED = 1
self.OPACITY_DEFAULT = 0.5
self.frame_change_suppress = False
self.particle = None
self.position = None
self.key_down = {}
self.disabled_particles = []
# Only run position-related initialization if init_type is 'view'
if init_type == 'view':
# parse valid positions
self.get_positions()
#print(self.positions)
self.frame_min = 0
self.frame_max = self.nd2.metadata['num_frames']-1
self.frame = self.frame_min
self.channel = 0
self.channel_min = 0
self.channel_max = len(self.nd2.metadata['channels'])-1
#self.max_pixel_value = np.iinfo(np.uint16).max
self.max_pixel_value = 10000
self.image_size = 400
self.outline_kernel = np.array([[0,0,1,0,0],[0,1,1,1,0],[1,1,0,1,1],[0,1,1,1,0],[0,0,1,0,0]])
# Replacing widgets from the show() method:
self.brightness_figure = go.Figure()
self.brightness_figure.update_layout(title='Brightness', height=1200)
self.brightness_lines = go.Scatter(x=[], y=[], mode='lines')
self.brightness_cursor_line = go.Scatter(x=[0,0], y=[0,1], mode='lines', line=dict(color=self.COLOR_RED))
self.area_figure = go.Figure()
self.area_figure.update_layout(title='Area')
self.area_lines = go.Scatter(x=[], y=[], mode='lines')
self.area_cursor_line = go.Scatter(x=[0,0], y=[0,1], mode='lines', line=dict(color=self.COLOR_RED))
controls_widgets = []
if init_type == 'view':
self.position_options = []
for pos in self.positions:
self.position_options.append((str(pos[0]), pos))
# Example: if self.positions is [(0, 'XY00'), (1, 'XY01')]
# then position_options = [('0', (0, 'XY00')), ('1', (1, 'XY01'))]
# position_options is a list of tuples, where the first element is a string and the second element is a tuple
self.position = self.position_options[0]
# Replacing widgets.Dropdown, widgets.IntSlider, and widgets.Checkbox with dictionaries
self.position_dropdown = {'type': 'Dropdown', 'description': 'Position:', 'options': self.position_options}
self.max_value_slider = {'type': 'IntSlider', 'min': 0, 'max': np.iinfo(np.uint16).max, 'description': 'Max Pixel Value (Contrast)', 'value': self.max_pixel_value}
self.frame_slider = {'type': 'IntSlider', 'description': 'Frame', 'value': self.frame}
self.channel_slider = {'type': 'IntSlider', 'min': self.channel_min, 'max': self.channel_max, 'description': 'Channel', 'value': self.channel}
self.particle_dropdown = {'type': 'Dropdown'}
self.enabled_checkbox = {'type': 'Checkbox', 'description': 'Cell Enabled', 'value': False}
self.area_figure.update_layout(height=300)
self.brightness_plot = self.plotly_to_json(self.brightness_figure)
def plotly_to_json(self, fig):
return pio.to_json(fig)
def get_positions(self):
# Will only get positions that have the necessary files (data.h5, features.csv, tracks.csv)
self.positions = []
folders = self.get_subdirs(self.output_path)
for folder in folders:
match = re.search(r'^XY0*(\d+)$', folder)
if not match:
continue
pos_files = self.get_files(os.path.join(self.output_path,folder))
self.pos_files = pos_files
if not 'data.h5' in pos_files:
continue
if not 'features.csv' in pos_files:
continue
if not 'tracks.csv' in pos_files:
continue
#print(pos_files)
# Create tuple with position number and folder name
pos = (int(match.group(1)), folder)
self.positions.append(pos)
self.positions = sorted(self.positions, key=lambda p: p[0], reverse=False)
def get_subdirs(self, directory):
return [d for d in os.listdir(directory) if os.path.isdir(os.path.join(directory,d))]
def get_files(self, directory):
return [d for d in os.listdir(directory) if os.path.isfile(os.path.join(directory,d))]
def get_track_data(self, particle, field):
t = self.all_tracks[self.all_tracks['particle'] == particle]
return t['frame'].values, t[field].values
def update_plots(self):
# sleep(0.150)
particle_index = self.particle_index()
def is_enabled(value):
enabled_values = {1, '1', '1.0', 1.0, True, 'True'}
value_str = str(value).lower()
enabled_values_str = {str(v).lower() for v in enabled_values}
return value_str in enabled_values_str
particle_states = []
ix=0
for particle in self.all_particles:
# Get just the first frame's enabled value for this particle
particle_data = self.all_tracks[self.all_tracks['particle'] == particle]
enabled_value = particle_data['enabled'].iloc[0] # Get first value
particle_states.append(1 if is_enabled(enabled_value) else 0)
if particle_index == particle:
if particle_states[-1] == 1:
self.particle_enabled = True
else:
self.particle_enabled = False
if particle_states[-1] == 0:
self.disabled_particles.append(float(ix))
ix+=1
# Initialize empty lists for area data
area_x = []
area_y = []
# Add enabled areas except selected particle
for i in range(self.all_particles_len):
if particle_states[i] == 1 and i != particle_index:
try:
area_x.append(self.area_x[i])
area_y.append(self.area_y[i])
except IndexError:
print("IndexError at particle", i)
# print("area_x shape and length:", (area_x[0]).shape, len(area_x))
# Add the selected particle area
area_x.append(self.area_x[particle_index])
area_y.append(self.area_y[particle_index])
# Initialize empty lists for brightness data
brightness_x = []
brightness_y = []
# Add enabled brightness values except selected particle
for i in range(self.all_particles_len):
# if particle_states[i] == 1 and i != particle_index:
try:
brightness_x.append(self.brightness_x[i])
brightness_y.append(self.brightness_y[i])
except IndexError:
print("IndexError at particle", i," (brightness)")
# Add the selected particle brightness
# brightness_x.append(self.brightness_x[particle_index])
# brightness_y.append(self.brightness_y[particle_index])
opacities = [self.OPACITY_DEFAULT] * len(brightness_x)
opacities = particle_states
colors = [self.COLOR_GRAY] * len(brightness_x)
if self.particle_enabled == True:
colors[particle_index] = self.COLOR_RED
opacities[particle_index] = self.OPACITY_SELECTED
else:
colors[particle_index] = self.COLOR_ORANGE
opacities[particle_index] = self.OPACITY_SELECTED
# Update brightness tracks
self.brightness_figure.data = []
for i in range(len(brightness_x)):
self.brightness_figure.add_trace(go.Scatter(x=brightness_x[i], y=brightness_y[i], mode='lines',
line=dict(color=colors[i]), opacity=opacities[i],
name=f'Trace {i}'))
self.brightness_figure.add_trace(go.Scatter(x=brightness_x[particle_index], y=brightness_y[particle_index], mode='lines',
line=dict(color=colors[particle_index]), opacity=opacities[particle_index],
name=f'Trace {particle_index} (Highlighted)'))
# self.brightness_figure.add_trace(self.brightness_cursor_line)
# Update area tracks
self.area_figure.data = []
for i in range(len(area_x)):
self.area_figure.add_trace(go.Scatter(x=area_x[i], y=area_y[i], mode='lines',
line=dict(color=colors[i]), opacity=opacities[i]))
self.area_figure.add_trace(self.area_cursor_line)
self.brightness_plot = self.plotly_to_json(self.brightness_figure)
def position_changed(self):
self.data_dir = os.path.join(self.output_path,self.position[1][1])
if self.file is not None:
self.file.close()
self.file = h5py.File(os.path.join(self.data_dir,'data.h5'), "r")
self.frame_min = self.file.attrs['frame_min']
self.frame_max = self.file.attrs['frame_max']
self.frame = self.frame_min
self.brightness_figure = go.Figure()
self.brightness_figure.update_layout(title='Brightness')
self.brightness_lines = go.Scatter(x=[], y=[], mode='lines')
self.brightness_cursor_line = go.Scatter(x=[0,0], y=[0,1], mode='lines', line=dict(color=self.COLOR_RED))
self.area_figure = go.Figure()
self.area_figure.update_layout(title='Area')
self.area_lines = go.Scatter(x=[], y=[], mode='lines')
self.area_cursor_line = go.Scatter(x=[0,0], y=[0,1], mode='lines', line=dict(color=self.COLOR_RED))
self.brightness_figure.update_layout(title=self.file.attrs['fl_channel_names'][0])
# set Brightnesses names for plots file_handle.attrs['fl_channel_names']
self.all_tracks = pd.read_csv(os.path.join(self.data_dir,'tracks.csv'))
self.all_particles = list(self.all_tracks['particle'].unique())
self.all_particles_len = len(self.all_particles)
self.brightness_x = []
self.brightness_y = []
for p in self.all_particles:
x,y = self.get_track_data(p, 'brightness_0')
self.brightness_x.append(x)
self.brightness_y.append(y)
# print("brightness_x length:", (len(self.brightness_x)))
self.area_x = []
self.area_y = []
for p in self.all_particles:
x,y = self.get_track_data(p, 'area')
self.area_x.append(x)
self.area_y.append(y)
# print("area_x length:", (len(self.area_x)))
colors = [self.COLOR_GRAY] * len(self.all_particles)
colors[len(self.all_particles)-1] = self.COLOR_RED
opacities = [self.OPACITY_DEFAULT] * len(self.all_particles)
opacities[len(self.all_particles)-1] = self.OPACITY_SELECTED
self.update_cursors()
# self.brightness_figure.add_trace(self.brightness_lines)
# self.brightness_figure.add_trace(self.brightness_cursor_line)
self.area_figure.add_trace(self.area_lines)
self.area_figure.add_trace(self.area_cursor_line)
self.particle = None
dropdown_options = []
for particle in self.all_particles:
dropdown_options.append((str(particle), particle))
self.particle_dropdown['options'] = dropdown_options
self.particle_dropdown['description'] = 'Cell (' + str(len(self.all_particles)) + '): '
# Stop slider from updating on every change & edit slider values
# self.frame_change_suppress = True
# if self.frame_min > self.frame_slider.max:
# self.frame_slider.max = self.frame_max
# self.frame_slider.min = self.frame_min
# else:
# self.frame_slider.min = self.frame_min
# self.frame_slider.max = self.frame_max
# self.frame_slider.value = self.frame
# self.frame_change_suppress = False
# Will be called if position actually changed (not initial)
if self.particle is None:
self.particle = self.particle_dropdown['options'][0][1]
self.particle_changed()
#self.update_cursors()
else:
self.frame_changed()
self.brightness_plot = self.plotly_to_json(self.brightness_figure)
# enable / disable current particle and save tracks to file
def particle_enabled_changed(self):
if self.particle_enabled == True:
index_csv = 1
else:
index_csv = 0
# self.all_tracks.loc[self.all_tracks['particle'] == self.particle, 'enabled'] = self.particle_enabled
self.all_tracks.loc[self.all_tracks['particle'] == self.particle, 'enabled'] = index_csv
self.all_tracks.to_csv(self.data_dir + '/tracks.csv')
self.update_plots()
self.draw_outlines()
self.update_image()
def particle_index(self):
# print(f'Index current particle {self.all_particles.index(self.particle)}')
return self.all_particles.index(self.particle)
def particle_changed(self):
def is_enabled(value):
enabled_values = {1, '1', '1.0', 1.0, True, 'True'}
value_str = str(value).lower()
enabled_values_str = {str(v).lower() for v in enabled_values}
return value_str in enabled_values_str
enabled_value = self.all_tracks[self.all_tracks['particle'] == self.particle]['enabled'].iloc[0]
enabled = is_enabled(enabled_value)
# enabled = len(self.all_tracks[(self.all_tracks['particle'] == self.particle) & ((self.all_tracks['enabled'] == True))]) > 0
# set both so no update to file is applied
self.particle_enabled = enabled
self.enabled_checkbox['value'] = enabled
self.update_plots()
self.particle_tracks = self.all_tracks[self.all_tracks['particle'] == self.particle]
# Get new Position for image
self.x = int(self.particle_tracks['x'].values.mean()) - self.image_size
self.y = int(self.particle_tracks['y'].values.mean()) - self.image_size
self.x = max(0,min(self.nd2.metadata['height'] - 2*self.image_size, self.x))
self.y = max(0,min(self.nd2.metadata['width'] - 2*self.image_size, self.y))
self.get_channel_image()
self.draw_outlines()
self.update_image()
def particle_dropdown_changed(self, change):
if change['new'] is not self.particle:
self.particle = change['new']
self.particle_changed()
def position_dropdown_changed(self, change):
if change['new'] is not self.position:
self.position = change['new']
self.position_changed()
def frame_slider_changed(self, change):
if self.frame_change_suppress:
return
if change['new'] is not self.frame:
self.frame = change['new']
self.frame_changed()
def max_pixel_slider_changed(self, change):
if change['new'] is not self.frame:
self.max_pixel_value = change['new']
self.max_pixel_value_changed()
def update_cursors(self):
# Move Brightness Cursor
self.brightness_cursor_line.x = [self.frame, self.frame]
self.brightness_cursor_line.y = [0, 1]
# Move Area Cursor
self.area_cursor_line.x = [self.frame, self.frame]
self.area_cursor_line.y = [0, 1]
def frame_changed(self):
self.update_cursors()
self.get_channel_image()
self.draw_outlines()
self.update_image()
def channel_changed(self):
self.get_channel_image()
self.update_image()
def max_pixel_value_changed(self):
self.get_channel_image()
self.update_image()
def channel_slider_changed(self, change):
if change['new'] is not self.channel:
self.channel = change['new']
self.channel_changed()
def enabled_checkbox_changed(self, change):
if change['new'] is not self.particle_enabled:
self.particle_enabled = change['new']
self.particle_enabled_changed()
def adjust_image(self, image, max_value):
img = image.copy().astype(np.float64)
img[img >= max_value] = np.iinfo(np.uint16).max
img[img < max_value] /= max_value
img[img < max_value] *= np.iinfo(np.uint16).max
return img
def get_channel_image(self):
img = self.nd2.get_frame_2D(v=int(self.position[0]),c=self.channel,t=self.frame)[self.x:self.x+2*self.image_size,self.y:self.y+2*self.image_size]
# There seems to be an issue with the arguments. Apparently v should be the position, but it's not working.
# Instead, v seems to be the input for the frame.
# img = self.nd2.get_frame_2D(v=0,c=self.channel,t=self.frame)[self.x:self.x+2*self.image_size,self.y:self.y+2*self.image_size]
pixel_val = self.max_pixel_value
if self.channel == 0:
pixel_val = 40000
adjusted = self.adjust_image(img,pixel_val)
self.channel_image = cv2.cvtColor(cv2.convertScaleAbs(adjusted, alpha=1./256., beta=-.49999),cv2.COLOR_GRAY2RGB)
def update_image(self):
img = self.combine_images(self.outline_image,self.channel_image,self.outline_mask)
_, img_enc = cv2.imencode('.jpg', img)
# self.image.value = img_enc.tobytes()
def return_image(self):
img = self.combine_images(self.outline_image,self.channel_image,self.outline_mask)
return numpy_to_b64_string(img)
# _, img_enc = cv2.imencode('.jpg', img)
# self.image.value = img_enc.tobytes()
def get_outline(self, img):
f64_img = img.astype(np.float64)
filter_img = cv2.filter2D(src=f64_img, ddepth=-1,kernel=self.outline_kernel) / self.outline_kernel.sum()
filter_img[filter_img == f64_img] = 0
mask = (f64_img != filter_img) & (filter_img > 0)
filter_img[mask] = img[mask]
return filter_img.astype(img.dtype)
def combine_images(self,a,b,m):
mask = cv2.cvtColor(m,cv2.COLOR_GRAY2RGB)
inv_mask = cv2.bitwise_not(mask)
ma = cv2.bitwise_and(a,mask)
mb = cv2.bitwise_and(b,inv_mask)
return cv2.add(ma,mb)
def get_particle_label(self):
tracks = self.all_tracks[(self.all_tracks['frame'] == self.frame) & (self.all_tracks['particle'] == self.particle)]
if len(tracks) == 0:
return None
return int(tracks.iloc[0]['label'])
def draw_outlines(self):
if self.frame < self.frame_min or self.frame > self.frame_max:
self.outline_mask = np.zeros((self.image_size*2,self.image_size*2),dtype=np.uint8)
self.outline_image = np.zeros((self.image_size*2,self.image_size*2,3),dtype=np.uint8)
return
all_labels = self.file['labels'][self.frame-self.frame_min] [self.x:self.x+2*self.image_size,self.y:self.y+2*self.image_size]
outlines = self.get_outline(all_labels)
image_shape = (self.channel_image.shape[0],self.channel_image.shape[1],3)
overlay = np.zeros(image_shape,dtype=np.uint8)
o = np.zeros(image_shape,dtype=np.uint8)
frame_tracks = self.all_tracks[self.all_tracks['frame'] == self.frame]
true_values = [1, '1', '1.0', 1.0, True, 'True', 'true', 'TRUE']
# Convert true_values to lowercase strings
true_values_lower = [str(v).lower() for v in true_values]
enabled_condition = ~frame_tracks['enabled'].astype(str).str.lower().isin(true_values_lower)
# Get unique labels for disabled tracks
enabled_labels = frame_tracks[~enabled_condition]['label'].unique()
# enabled_labels = frame_tracks[frame_tracks['enabled'] == True]['label'].unique()
tracked_labels = frame_tracks['label'].unique()
# all tracked cells
o = cv2.rectangle(o, (0,0), (image_shape[0],image_shape[1]), (255,0,0), -1) # Red
m1 = np.isin(outlines, tracked_labels).astype(np.uint8)*255
overlay = self.combine_images(o,overlay,m1)
# enabled cells
o = cv2.rectangle(o, (0,0), (image_shape[0],image_shape[1]), (0,255,0), -1) # Green
m2 = np.isin(outlines, enabled_labels).astype(np.uint8)*255
overlay = self.combine_images(o,overlay,m2)
# Selected cell
label = self.get_particle_label()
if label is not None:
if self.particle_enabled == True:
o = cv2.rectangle(o, (0,0), (image_shape[0],image_shape[1]), (0,0,255), -1) # Dark Blue
else:
o = cv2.rectangle(o, (0,0), (image_shape[0],image_shape[1]), (0,140,255), -1)
m3 = (outlines == label).astype(np.uint8)*255
overlay = self.combine_images(o,overlay,m3)
self.outline_image = overlay #self.combine_images(overlay,self.image_data,m1)
self.outline_mask = m1
def handle_keydown(self, event):
if event['key'] in self.key_down and self.key_down[event['key']] == True:
return
self.key_down[event['key']] = True
ctrl = event['ctrlKey']
if event['key'] == 'ArrowLeft':
if ctrl:
self.frame_slider['value'] = max(self.frame_min, self.frame - 10)
else:
self.frame_slider['value'] = max(self.frame_min, self.frame - 1)
elif event['key'] == 'ArrowRight':
if ctrl:
self.frame_slider['value'] = min(self.frame_max, self.frame + 10)
else:
self.frame_slider['value'] = min(self.frame_max, self.frame + 1)
elif event['key'] == 'c':
channel = self.channel_slider['value'] + 1
if channel > self.channel_max:
channel = self.channel_min
self.channel_slider['value'] = channel
elif event['key'] == 'ArrowUp':
index = self.particle_index()
if index < len(self.all_particles) - 1:
self.particle_dropdown['value'] = self.all_particles[index+1]
elif event['key'] == 'ArrowDown':
index = self.particle_index()
if index > 0:
self.particle_dropdown['value'] = self.all_particles[index-1]
elif event['key'] == 'Enter' and ctrl:
self.enabled_checkbox['value'] = not self.enabled_checkbox['value']
def handle_keyup(self, event):
self.key_down[event['key']] = False
|
# %%
import src.pyama_web.core.pyama_util as pyama_util
import src.pyama_web.backend.gui as gui
AG_MOON = str(pyama_util.moonraedler_dir())
# %%
# Segment position(s)
# Path to ND2 File
nd2_path = AG_MOON + '/Judith/Students/Simon_Master/230505_TF73_HuH7.nd2'
# Output directory (will create a new folder per position in here)
out_dir = AG_MOON + '/SPrins/Pyama_Test/Multi_FL_Test'
# Positions to evaluate (zero based so position 1 would be 0, 2 would be 1 etc)
# Comma seperated inside square brackets e.g. [1,2,3]
# Empty brackets for all positions e.g. []
positions = [70,71]
# Starting frame zero based
# Set to None to ignore
frame_min = None
# End frame zero based (zero based)
# Set to None to ignore
frame_max = None
# Channel to use for segmentation (zero based)
segmentation_channel = 0
# Channel(s) to use for fluorescence tracks (zero based)
# Comma separated inside square brackets
fluorescence_channels = [1,2]
pyama_util.segment_positions(nd2_path,out_dir,positions, segmentation_channel, fluorescence_channels,frame_min=frame_min,frame_max=frame_max)
# %%
# CellViewer GUI
# Keybinds:
# c: rotate through channels
# arrowkey (left/right): previous/next frame
# ctl + arrowkey (left/right): same as above but 10 frames instead of just 1
# arrowkey (down/up): previous/next cell
# ctrl + enter: toggle cell (enabled/disabled)
nd2_dir = AG_MOON + '/Judith/Students/Simon_Master/230505_TF73_HuH7.nd2'
out_dir = AG_MOON + '/SPrins/Pyama_Test/Multi_FL_Test'
gui.CellViewer(nd2_dir, out_dir).show()
# %%
# Track position(s)
# Output directory (same as for segmentation)
out_dir = AG_MOON + '/SPrins/Pyama_Test/Multi_FL_Test'
# Positions to evaluate (zero based so position 1 would be 0, 2 would be 1 etc)
# Folder names inside output directory are already zero based so XY001 would be 1 but the second position of the file
# Comma seperated inside square brackets e.g. [1,2,3]
# Empty brackets for all positions e.g. [] (will look for all folders in output directory that have been segmented and perform tracking on them)
positions = [70,71]
# Expand labels during tracking (can help if cells move a lot so that overlap between frames is not guaranteed)
# Grows the labels for tracking by the amount of pixels in each direction
expand_labels = 0
pyama_util.tracking_pyama(out_dir,positions,expand=expand_labels)
# %%
# Perform square ROIs "segmentation" for position(s)
# Output directory (same as for segmentation)
out_dir = AG_MOON + '/SPrins/Pyama_Test/Multi_FL_Test'
# Positions to evaluate (same as tracking)
positions = [70,71]
# size in um of box to use for squares (width,height = 2*square_um_size)
square_um_size = 30
pyama_util.square_roi(out_dir,positions,square_um_size)
# %%
# Convert position output to excel file for position(s) (old pyama output format)
# Output directory (same as for segmentation)
out_dir = AG_MOON + '/SPrins/Pyama_Test/Multi_FL_Test'
# Positions to evaluate (same as tracking)
positions = [70,71]
# How many minutes are between each frame (for time in output)
minutes_per_frame = 5
pyama_util.csv_output(out_dir,positions,minutes_per_frame)
# %%
|
"""Welcome to Reflex! This file outlines the steps to create a basic app."""
import reflex as rx
from rxconfig import config
class State(rx.State):
"""The app state."""
...
def index() -> rx.Component:
# Welcome Page (Index)
return rx.container(
rx.color_mode.button(position="top-right"),
rx.vstack(
rx.heading("Welcome to Reflex!", size="9"),
rx.text(
"Get started by editing ",
rx.code(f"{config.app_name}/{config.app_name}.py"),
size="5",
),
rx.link(
rx.button("Check out our docs!"),
href="https://reflex.dev/docs/getting-started/introduction/",
is_external=True,
),
spacing="5",
justify="center",
min_height="85vh",
),
rx.logo(),
)
app = rx.App()
app.add_page(index)
|
null |
import reflex as rx
config = rx.Config(app_name="app")
|
import reflex as rx
from app.state import ProjectileState
from app.components.input_form import input_form
from app.components.trajectory_plot import (
trajectory_plot_component,
)
from rxconfig import config
def index() -> rx.Component:
return rx.el.div(
rx.el.div(
rx.el.h1(
"Projectile Trajectory Calculator",
class_name="text-4xl font-extrabold text-center my-8 text-transparent bg-clip-text bg-gradient-to-r from-indigo-600 to-purple-600",
),
rx.el.div(
rx.el.div(
input_form(),
class_name="w-full md:w-1/3 lg:w-1/4 p-4",
),
rx.el.div(
rx.cond(
ProjectileState.trajectory_data.length()
> 1,
trajectory_plot_component(),
rx.el.div(
rx.el.p(
"Enter parameters and click 'Calculate Trajectory' to visualize the path.",
class_name="text-gray-600 text-center p-10 text-lg",
),
class_name="flex items-center justify-center h-[450px] bg-white rounded-lg shadow-md",
),
),
class_name="w-full md:w-2/3 lg:w-3/4 p-4",
),
class_name="flex flex-col md:flex-row",
),
class_name="container mx-auto p-4",
),
on_mount=ProjectileState.calculate_default_trajectory,
class_name="min-h-screen bg-gradient-to-br from-gray-100 to-slate-200",
)
app = rx.App(theme=rx.theme(appearance="light"))
app.add_page(index)
|
import reflex as rx
import numpy as np
from typing import TypedDict, List as TypingList
class TrajectoryPoint(TypedDict):
x: float
y: float
class ProjectileState(rx.State):
initial_velocity: float = 20.0
launch_angle_deg: float = 45.0
initial_height: float = 0.0
gravity: float = 9.81
time_step: float = 0.05
trajectory_data: TypingList[TrajectoryPoint] = []
max_height: float = 0.0
total_range: float = 0.0
time_of_flight: float = 0.0
error_message: str = ""
@rx.var
def max_height_str(self) -> str:
return f"{self.max_height:.2f}"
@rx.var
def total_range_str(self) -> str:
return f"{self.total_range:.2f}"
@rx.var
def time_of_flight_str(self) -> str:
return f"{self.time_of_flight:.2f}"
@rx.event
def handle_form_submit(self, form_data: dict):
self.error_message = ""
try:
self.initial_velocity = float(
form_data["initial_velocity"]
)
self.launch_angle_deg = float(
form_data["launch_angle"]
)
initial_height_str = form_data.get(
"initial_height", "0.0"
)
self.initial_height = float(
initial_height_str
if initial_height_str
else "0.0"
)
if self.initial_velocity <= 0:
self.error_message = (
"Initial velocity must be positive."
)
self._reset_outputs()
return
if not 0 <= self.launch_angle_deg <= 90:
self.error_message = "Launch angle must be between 0 and 90 degrees."
self._reset_outputs()
return
if self.initial_height < 0:
self.error_message = (
"Initial height cannot be negative."
)
self._reset_outputs()
return
except ValueError:
self.error_message = "Invalid input. Please enter numeric values."
self._reset_outputs()
return
except KeyError as e:
self.error_message = f"Missing required field: {e}. Please fill all fields."
self._reset_outputs()
return
self._calculate_trajectory()
def _reset_outputs(self):
self.trajectory_data = []
self.max_height = 0.0
self.total_range = 0.0
self.time_of_flight = 0.0
def _calculate_trajectory(self):
self._reset_outputs()
angle_rad = np.deg2rad(self.launch_angle_deg)
v0x = self.initial_velocity * np.cos(angle_rad)
v0y = self.initial_velocity * np.sin(angle_rad)
if self.initial_height == 0 and (
self.launch_angle_deg == 0
or (v0y <= 0 and v0x == 0)
):
self.trajectory_data = [
TrajectoryPoint(x=0, y=0)
]
self.max_height = 0.0
self.total_range = 0.0
self.time_of_flight = 0.0
return
t = 0.0
x = 0.0
y_current = self.initial_height
current_max_height = self.initial_height
self.trajectory_data.append(
TrajectoryPoint(x=x, y=y_current)
)
abs_v0y = abs(v0y)
if self.gravity > 0:
time_to_peak_if_positive_v0y = (
abs_v0y / self.gravity if v0y > 0 else 0
)
height_at_peak = (
self.initial_height
+ abs_v0y * time_to_peak_if_positive_v0y
- 0.5
* self.gravity
* time_to_peak_if_positive_v0y**2
if v0y > 0
else self.initial_height
)
time_from_peak_to_ground = (
np.sqrt(2 * height_at_peak / self.gravity)
if height_at_peak >= 0
else 0
)
max_sim_time = (
time_to_peak_if_positive_v0y
+ time_from_peak_to_ground
) * 1.5 + 5 * self.time_step
if max_sim_time <= self.time_step:
max_sim_time = 100 * self.time_step
else:
max_sim_time = (
1000 * self.time_step
if v0y <= 0
else (
2 * self.initial_height / abs(v0y)
if abs(v0y) > 0
else 1000 * self.time_step
)
)
while True:
t += self.time_step
x = v0x * t
y_new = (
self.initial_height
+ v0y * t
- 0.5 * self.gravity * t**2
)
current_max_height = max(
current_max_height, y_new
)
if y_new < 0:
y_prev = self.trajectory_data[-1]["y"]
t_prev = t - self.time_step
if y_prev > 0:
t_fraction = y_prev / (y_prev - y_new)
t_impact = (
t_prev + t_fraction * self.time_step
)
x_impact = v0x * t_impact
self.trajectory_data.append(
TrajectoryPoint(x=x_impact, y=0.0)
)
self.time_of_flight = t_impact
self.total_range = x_impact
else:
self.trajectory_data.append(
TrajectoryPoint(
x=self.trajectory_data[-1]["x"],
y=0.0,
)
)
self.time_of_flight = t_prev
self.total_range = self.trajectory_data[
-1
]["x"]
break
self.trajectory_data.append(
TrajectoryPoint(x=x, y=y_new)
)
if t > max_sim_time:
self.error_message = "Simulation time exceeded safety limit. Trajectory may be incomplete."
self.time_of_flight = t
self.total_range = x
break
self.max_height = current_max_height
if len(self.trajectory_data) < 2:
self.trajectory_data.append(
TrajectoryPoint(
x=self.total_range + 0.01, y=0.0
)
)
@rx.event
def calculate_default_trajectory(self):
self._calculate_trajectory()
|
null |
import reflex as rx
from app.state import ProjectileState
def input_form() -> rx.Component:
return rx.el.form(
rx.el.div(
rx.el.label(
"Initial Velocity (m/s):",
class_name="block text-sm font-medium text-gray-700",
),
rx.el.input(
name="initial_velocity",
type="number",
default_value=ProjectileState.initial_velocity.to_string(),
placeholder="e.g., 20",
step="0.1",
required=True,
class_name="mt-1 block w-full px-3 py-2 bg-white border border-gray-300 rounded-md shadow-sm focus:outline-none focus:ring-indigo-500 focus:border-indigo-500 sm:text-sm",
),
class_name="mb-4",
),
rx.el.div(
rx.el.label(
"Launch Angle (degrees):",
class_name="block text-sm font-medium text-gray-700",
),
rx.el.input(
name="launch_angle",
type="number",
default_value=ProjectileState.launch_angle_deg.to_string(),
placeholder="e.g., 45",
step="0.1",
min="0",
max="90",
required=True,
class_name="mt-1 block w-full px-3 py-2 bg-white border border-gray-300 rounded-md shadow-sm focus:outline-none focus:ring-indigo-500 focus:border-indigo-500 sm:text-sm",
),
class_name="mb-4",
),
rx.el.div(
rx.el.label(
"Initial Height (m):",
class_name="block text-sm font-medium text-gray-700",
),
rx.el.input(
name="initial_height",
type="number",
default_value=ProjectileState.initial_height.to_string(),
placeholder="e.g., 0",
step="0.1",
min="0",
required=True,
class_name="mt-1 block w-full px-3 py-2 bg-white border border-gray-300 rounded-md shadow-sm focus:outline-none focus:ring-indigo-500 focus:border-indigo-500 sm:text-sm",
),
class_name="mb-4",
),
rx.el.button(
"Calculate Trajectory",
type="submit",
class_name="w-full bg-indigo-600 hover:bg-indigo-700 text-white font-semibold py-2 px-4 rounded-md shadow-sm focus:outline-none focus:ring-2 focus:ring-offset-2 focus:ring-indigo-500",
),
on_submit=ProjectileState.handle_form_submit,
reset_on_submit=True,
class_name="p-6 bg-gray-100 rounded-lg shadow-md",
)
|
import reflex as rx
from app.state import ProjectileState
def trajectory_plot_component() -> rx.Component:
return rx.el.div(
rx.recharts.scatter_chart(
rx.recharts.cartesian_grid(
stroke_dasharray="3 3", stroke="#cccccc"
),
rx.recharts.x_axis(
rx.recharts.label(
value="Distance (m)",
position="insideBottom",
dy=10,
fill="#374151",
),
type="number",
data_key="x",
domain=["auto", "auto"],
allow_data_overflow=True,
stroke="#4b5563",
),
rx.recharts.y_axis(
rx.recharts.label(
value="Height (m)",
angle=-90,
position="insideLeft",
dx=-5,
fill="#374151",
),
type="number",
data_key="y",
domain=[0, "auto"],
allow_data_overflow=True,
stroke="#4b5563",
),
rx.recharts.scatter(
data_key="y",
name="Trajectory",
fill="#4f46e5",
line=True,
shape="circle",
),
rx.recharts.tooltip(
cursor={"strokeDasharray": "3 3"}
),
rx.recharts.legend(
wrapper_style={"paddingTop": "10px"}
),
data=ProjectileState.trajectory_data,
height=450,
margin={
"left": 20,
"right": 20,
"top": 25,
"bottom": 20,
},
class_name="bg-white p-4 rounded-lg shadow-md w-full",
),
rx.el.div(
rx.el.h3(
"Trajectory Metrics",
class_name="text-xl font-semibold mt-6 mb-3 text-gray-800",
),
rx.el.div(
rx.el.p(
f"Max Height: {ProjectileState.max_height_str} m",
class_name="text-md text-gray-700 py-1",
),
rx.el.p(
f"Total Range: {ProjectileState.total_range_str} m",
class_name="text-md text-gray-700 py-1",
),
rx.el.p(
f"Time of Flight: {ProjectileState.time_of_flight_str} s",
class_name="text-md text-gray-700 py-1",
),
class_name="mt-4 p-4 bg-gray-100 rounded-lg shadow-sm",
),
class_name="w-full",
),
rx.cond(
ProjectileState.error_message != "",
rx.el.div(
ProjectileState.error_message,
class_name="mt-4 p-3 bg-red-100 text-red-700 border border-red-300 rounded-md shadow-sm",
),
rx.fragment(),
),
class_name="w-full",
)
|
null |
import reflex as rx
config = rx.Config(
app_name="app1",
)
|
import abc
import builtins
import collections
import collections.abc
import contextlib
import enum
import functools
import inspect
import keyword
import operator
import sys
import types as _types
import typing
import warnings
__all__ = [
# Super-special typing primitives.
'Any',
'ClassVar',
'Concatenate',
'Final',
'LiteralString',
'ParamSpec',
'ParamSpecArgs',
'ParamSpecKwargs',
'Self',
'Type',
'TypeVar',
'TypeVarTuple',
'Unpack',
# ABCs (from collections.abc).
'Awaitable',
'AsyncIterator',
'AsyncIterable',
'Coroutine',
'AsyncGenerator',
'AsyncContextManager',
'Buffer',
'ChainMap',
# Concrete collection types.
'ContextManager',
'Counter',
'Deque',
'DefaultDict',
'NamedTuple',
'OrderedDict',
'TypedDict',
# Structural checks, a.k.a. protocols.
'SupportsAbs',
'SupportsBytes',
'SupportsComplex',
'SupportsFloat',
'SupportsIndex',
'SupportsInt',
'SupportsRound',
# One-off things.
'Annotated',
'assert_never',
'assert_type',
'clear_overloads',
'dataclass_transform',
'deprecated',
'Doc',
'evaluate_forward_ref',
'get_overloads',
'final',
'Format',
'get_annotations',
'get_args',
'get_origin',
'get_original_bases',
'get_protocol_members',
'get_type_hints',
'IntVar',
'is_protocol',
'is_typeddict',
'Literal',
'NewType',
'overload',
'override',
'Protocol',
'reveal_type',
'runtime',
'runtime_checkable',
'Text',
'TypeAlias',
'TypeAliasType',
'TypeForm',
'TypeGuard',
'TypeIs',
'TYPE_CHECKING',
'Never',
'NoReturn',
'ReadOnly',
'Required',
'NotRequired',
'NoDefault',
'NoExtraItems',
# Pure aliases, have always been in typing
'AbstractSet',
'AnyStr',
'BinaryIO',
'Callable',
'Collection',
'Container',
'Dict',
'ForwardRef',
'FrozenSet',
'Generator',
'Generic',
'Hashable',
'IO',
'ItemsView',
'Iterable',
'Iterator',
'KeysView',
'List',
'Mapping',
'MappingView',
'Match',
'MutableMapping',
'MutableSequence',
'MutableSet',
'Optional',
'Pattern',
'Reversible',
'Sequence',
'Set',
'Sized',
'TextIO',
'Tuple',
'Union',
'ValuesView',
'cast',
'no_type_check',
'no_type_check_decorator',
]
# for backward compatibility
PEP_560 = True
GenericMeta = type
_PEP_696_IMPLEMENTED = sys.version_info >= (3, 13, 0, "beta")
# Added with bpo-45166 to 3.10.1+ and some 3.9 versions
_FORWARD_REF_HAS_CLASS = "__forward_is_class__" in typing.ForwardRef.__slots__
# The functions below are modified copies of typing internal helpers.
# They are needed by _ProtocolMeta and they provide support for PEP 646.
class _Sentinel:
def __repr__(self):
return "<sentinel>"
_marker = _Sentinel()
if sys.version_info >= (3, 10):
def _should_collect_from_parameters(t):
return isinstance(
t, (typing._GenericAlias, _types.GenericAlias, _types.UnionType)
)
elif sys.version_info >= (3, 9):
def _should_collect_from_parameters(t):
return isinstance(t, (typing._GenericAlias, _types.GenericAlias))
else:
def _should_collect_from_parameters(t):
return isinstance(t, typing._GenericAlias) and not t._special
NoReturn = typing.NoReturn
# Some unconstrained type variables. These are used by the container types.
# (These are not for export.)
T = typing.TypeVar('T') # Any type.
KT = typing.TypeVar('KT') # Key type.
VT = typing.TypeVar('VT') # Value type.
T_co = typing.TypeVar('T_co', covariant=True) # Any type covariant containers.
T_contra = typing.TypeVar('T_contra', contravariant=True) # Ditto contravariant.
if sys.version_info >= (3, 11):
from typing import Any
else:
class _AnyMeta(type):
def __instancecheck__(self, obj):
if self is Any:
raise TypeError("typing_extensions.Any cannot be used with isinstance()")
return super().__instancecheck__(obj)
def __repr__(self):
if self is Any:
return "typing_extensions.Any"
return super().__repr__()
class Any(metaclass=_AnyMeta):
"""Special type indicating an unconstrained type.
- Any is compatible with every type.
- Any assumed to have all methods.
- All values assumed to be instances of Any.
Note that all the above statements are true from the point of view of
static type checkers. At runtime, Any should not be used with instance
checks.
"""
def __new__(cls, *args, **kwargs):
if cls is Any:
raise TypeError("Any cannot be instantiated")
return super().__new__(cls, *args, **kwargs)
ClassVar = typing.ClassVar
class _ExtensionsSpecialForm(typing._SpecialForm, _root=True):
def __repr__(self):
return 'typing_extensions.' + self._name
Final = typing.Final
if sys.version_info >= (3, 11):
final = typing.final
else:
# @final exists in 3.8+, but we backport it for all versions
# before 3.11 to keep support for the __final__ attribute.
# See https://bugs.python.org/issue46342
def final(f):
"""This decorator can be used to indicate to type checkers that
the decorated method cannot be overridden, and decorated class
cannot be subclassed. For example:
class Base:
@final
def done(self) -> None:
...
class Sub(Base):
def done(self) -> None: # Error reported by type checker
...
@final
class Leaf:
...
class Other(Leaf): # Error reported by type checker
...
There is no runtime checking of these properties. The decorator
sets the ``__final__`` attribute to ``True`` on the decorated object
to allow runtime introspection.
"""
try:
f.__final__ = True
except (AttributeError, TypeError):
# Skip the attribute silently if it is not writable.
# AttributeError happens if the object has __slots__ or a
# read-only property, TypeError if it's a builtin class.
pass
return f
def IntVar(name):
return typing.TypeVar(name)
# A Literal bug was fixed in 3.11.0, 3.10.1 and 3.9.8
if sys.version_info >= (3, 10, 1):
Literal = typing.Literal
else:
def _flatten_literal_params(parameters):
"""An internal helper for Literal creation: flatten Literals among parameters"""
params = []
for p in parameters:
if isinstance(p, _LiteralGenericAlias):
params.extend(p.__args__)
else:
params.append(p)
return tuple(params)
def _value_and_type_iter(params):
for p in params:
yield p, type(p)
class _LiteralGenericAlias(typing._GenericAlias, _root=True):
def __eq__(self, other):
if not isinstance(other, _LiteralGenericAlias):
return NotImplemented
these_args_deduped = set(_value_and_type_iter(self.__args__))
other_args_deduped = set(_value_and_type_iter(other.__args__))
return these_args_deduped == other_args_deduped
def __hash__(self):
return hash(frozenset(_value_and_type_iter(self.__args__)))
class _LiteralForm(_ExtensionsSpecialForm, _root=True):
def __init__(self, doc: str):
self._name = 'Literal'
self._doc = self.__doc__ = doc
def __getitem__(self, parameters):
if not isinstance(parameters, tuple):
parameters = (parameters,)
parameters = _flatten_literal_params(parameters)
val_type_pairs = list(_value_and_type_iter(parameters))
try:
deduped_pairs = set(val_type_pairs)
except TypeError:
# unhashable parameters
pass
else:
# similar logic to typing._deduplicate on Python 3.9+
if len(deduped_pairs) < len(val_type_pairs):
new_parameters = []
for pair in val_type_pairs:
if pair in deduped_pairs:
new_parameters.append(pair[0])
deduped_pairs.remove(pair)
assert not deduped_pairs, deduped_pairs
parameters = tuple(new_parameters)
return _LiteralGenericAlias(self, parameters)
Literal = _LiteralForm(doc="""\
A type that can be used to indicate to type checkers
that the corresponding value has a value literally equivalent
to the provided parameter. For example:
var: Literal[4] = 4
The type checker understands that 'var' is literally equal to
the value 4 and no other value.
Literal[...] cannot be subclassed. There is no runtime
checking verifying that the parameter is actually a value
instead of a type.""")
_overload_dummy = typing._overload_dummy
if hasattr(typing, "get_overloads"): # 3.11+
overload = typing.overload
get_overloads = typing.get_overloads
clear_overloads = typing.clear_overloads
else:
# {module: {qualname: {firstlineno: func}}}
_overload_registry = collections.defaultdict(
functools.partial(collections.defaultdict, dict)
)
def overload(func):
"""Decorator for overloaded functions/methods.
In a stub file, place two or more stub definitions for the same
function in a row, each decorated with @overload. For example:
@overload
def utf8(value: None) -> None: ...
@overload
def utf8(value: bytes) -> bytes: ...
@overload
def utf8(value: str) -> bytes: ...
In a non-stub file (i.e. a regular .py file), do the same but
follow it with an implementation. The implementation should *not*
be decorated with @overload. For example:
@overload
def utf8(value: None) -> None: ...
@overload
def utf8(value: bytes) -> bytes: ...
@overload
def utf8(value: str) -> bytes: ...
def utf8(value):
# implementation goes here
The overloads for a function can be retrieved at runtime using the
get_overloads() function.
"""
# classmethod and staticmethod
f = getattr(func, "__func__", func)
try:
_overload_registry[f.__module__][f.__qualname__][
f.__code__.co_firstlineno
] = func
except AttributeError:
# Not a normal function; ignore.
pass
return _overload_dummy
def get_overloads(func):
"""Return all defined overloads for *func* as a sequence."""
# classmethod and staticmethod
f = getattr(func, "__func__", func)
if f.__module__ not in _overload_registry:
return []
mod_dict = _overload_registry[f.__module__]
if f.__qualname__ not in mod_dict:
return []
return list(mod_dict[f.__qualname__].values())
def clear_overloads():
"""Clear all overloads in the registry."""
_overload_registry.clear()
# This is not a real generic class. Don't use outside annotations.
Type = typing.Type
# Various ABCs mimicking those in collections.abc.
# A few are simply re-exported for completeness.
Awaitable = typing.Awaitable
Coroutine = typing.Coroutine
AsyncIterable = typing.AsyncIterable
AsyncIterator = typing.AsyncIterator
Deque = typing.Deque
DefaultDict = typing.DefaultDict
OrderedDict = typing.OrderedDict
Counter = typing.Counter
ChainMap = typing.ChainMap
Text = typing.Text
TYPE_CHECKING = typing.TYPE_CHECKING
if sys.version_info >= (3, 13, 0, "beta"):
from typing import AsyncContextManager, AsyncGenerator, ContextManager, Generator
else:
def _is_dunder(attr):
return attr.startswith('__') and attr.endswith('__')
# Python <3.9 doesn't have typing._SpecialGenericAlias
_special_generic_alias_base = getattr(
typing, "_SpecialGenericAlias", typing._GenericAlias
)
class _SpecialGenericAlias(_special_generic_alias_base, _root=True):
def __init__(self, origin, nparams, *, inst=True, name=None, defaults=()):
if _special_generic_alias_base is typing._GenericAlias:
# Python <3.9
self.__origin__ = origin
self._nparams = nparams
super().__init__(origin, nparams, special=True, inst=inst, name=name)
else:
# Python >= 3.9
super().__init__(origin, nparams, inst=inst, name=name)
self._defaults = defaults
def __setattr__(self, attr, val):
allowed_attrs = {'_name', '_inst', '_nparams', '_defaults'}
if _special_generic_alias_base is typing._GenericAlias:
# Python <3.9
allowed_attrs.add("__origin__")
if _is_dunder(attr) or attr in allowed_attrs:
object.__setattr__(self, attr, val)
else:
setattr(self.__origin__, attr, val)
@typing._tp_cache
def __getitem__(self, params):
if not isinstance(params, tuple):
params = (params,)
msg = "Parameters to generic types must be types."
params = tuple(typing._type_check(p, msg) for p in params)
if (
self._defaults
and len(params) < self._nparams
and len(params) + len(self._defaults) >= self._nparams
):
params = (*params, *self._defaults[len(params) - self._nparams:])
actual_len = len(params)
if actual_len != self._nparams:
if self._defaults:
expected = f"at least {self._nparams - len(self._defaults)}"
else:
expected = str(self._nparams)
if not self._nparams:
raise TypeError(f"{self} is not a generic class")
raise TypeError(
f"Too {'many' if actual_len > self._nparams else 'few'}"
f" arguments for {self};"
f" actual {actual_len}, expected {expected}"
)
return self.copy_with(params)
_NoneType = type(None)
Generator = _SpecialGenericAlias(
collections.abc.Generator, 3, defaults=(_NoneType, _NoneType)
)
AsyncGenerator = _SpecialGenericAlias(
collections.abc.AsyncGenerator, 2, defaults=(_NoneType,)
)
ContextManager = _SpecialGenericAlias(
contextlib.AbstractContextManager,
2,
name="ContextManager",
defaults=(typing.Optional[bool],)
)
AsyncContextManager = _SpecialGenericAlias(
contextlib.AbstractAsyncContextManager,
2,
name="AsyncContextManager",
defaults=(typing.Optional[bool],)
)
_PROTO_ALLOWLIST = {
'collections.abc': [
'Callable', 'Awaitable', 'Iterable', 'Iterator', 'AsyncIterable',
'Hashable', 'Sized', 'Container', 'Collection', 'Reversible', 'Buffer',
],
'contextlib': ['AbstractContextManager', 'AbstractAsyncContextManager'],
'typing_extensions': ['Buffer'],
}
_EXCLUDED_ATTRS = frozenset(typing.EXCLUDED_ATTRIBUTES) | {
"__match_args__", "__protocol_attrs__", "__non_callable_proto_members__",
"__final__",
}
def _get_protocol_attrs(cls):
attrs = set()
for base in cls.__mro__[:-1]: # without object
if base.__name__ in {'Protocol', 'Generic'}:
continue
annotations = getattr(base, '__annotations__', {})
for attr in (*base.__dict__, *annotations):
if (not attr.startswith('_abc_') and attr not in _EXCLUDED_ATTRS):
attrs.add(attr)
return attrs
def _caller(depth=2):
try:
return sys._getframe(depth).f_globals.get('__name__', '__main__')
except (AttributeError, ValueError): # For platforms without _getframe()
return None
# `__match_args__` attribute was removed from protocol members in 3.13,
# we want to backport this change to older Python versions.
if sys.version_info >= (3, 13):
Protocol = typing.Protocol
else:
def _allow_reckless_class_checks(depth=3):
"""Allow instance and class checks for special stdlib modules.
The abc and functools modules indiscriminately call isinstance() and
issubclass() on the whole MRO of a user class, which may contain protocols.
"""
return _caller(depth) in {'abc', 'functools', None}
def _no_init(self, *args, **kwargs):
if type(self)._is_protocol:
raise TypeError('Protocols cannot be instantiated')
def _type_check_issubclass_arg_1(arg):
"""Raise TypeError if `arg` is not an instance of `type`
in `issubclass(arg, <protocol>)`.
In most cases, this is verified by type.__subclasscheck__.
Checking it again unnecessarily would slow down issubclass() checks,
so, we don't perform this check unless we absolutely have to.
For various error paths, however,
we want to ensure that *this* error message is shown to the user
where relevant, rather than a typing.py-specific error message.
"""
if not isinstance(arg, type):
# Same error message as for issubclass(1, int).
raise TypeError('issubclass() arg 1 must be a class')
# Inheriting from typing._ProtocolMeta isn't actually desirable,
# but is necessary to allow typing.Protocol and typing_extensions.Protocol
# to mix without getting TypeErrors about "metaclass conflict"
class _ProtocolMeta(type(typing.Protocol)):
# This metaclass is somewhat unfortunate,
# but is necessary for several reasons...
#
# NOTE: DO NOT call super() in any methods in this class
# That would call the methods on typing._ProtocolMeta on Python 3.8-3.11
# and those are slow
def __new__(mcls, name, bases, namespace, **kwargs):
if name == "Protocol" and len(bases) < 2:
pass
elif {Protocol, typing.Protocol} & set(bases):
for base in bases:
if not (
base in {object, typing.Generic, Protocol, typing.Protocol}
or base.__name__ in _PROTO_ALLOWLIST.get(base.__module__, [])
or is_protocol(base)
):
raise TypeError(
f"Protocols can only inherit from other protocols, "
f"got {base!r}"
)
return abc.ABCMeta.__new__(mcls, name, bases, namespace, **kwargs)
def __init__(cls, *args, **kwargs):
abc.ABCMeta.__init__(cls, *args, **kwargs)
if getattr(cls, "_is_protocol", False):
cls.__protocol_attrs__ = _get_protocol_attrs(cls)
def __subclasscheck__(cls, other):
if cls is Protocol:
return type.__subclasscheck__(cls, other)
if (
getattr(cls, '_is_protocol', False)
and not _allow_reckless_class_checks()
):
if not getattr(cls, '_is_runtime_protocol', False):
_type_check_issubclass_arg_1(other)
raise TypeError(
"Instance and class checks can only be used with "
"@runtime_checkable protocols"
)
if (
# this attribute is set by @runtime_checkable:
cls.__non_callable_proto_members__
and cls.__dict__.get("__subclasshook__") is _proto_hook
):
_type_check_issubclass_arg_1(other)
non_method_attrs = sorted(cls.__non_callable_proto_members__)
raise TypeError(
"Protocols with non-method members don't support issubclass()."
f" Non-method members: {str(non_method_attrs)[1:-1]}."
)
return abc.ABCMeta.__subclasscheck__(cls, other)
def __instancecheck__(cls, instance):
# We need this method for situations where attributes are
# assigned in __init__.
if cls is Protocol:
return type.__instancecheck__(cls, instance)
if not getattr(cls, "_is_protocol", False):
# i.e., it's a concrete subclass of a protocol
return abc.ABCMeta.__instancecheck__(cls, instance)
if (
not getattr(cls, '_is_runtime_protocol', False) and
not _allow_reckless_class_checks()
):
raise TypeError("Instance and class checks can only be used with"
" @runtime_checkable protocols")
if abc.ABCMeta.__instancecheck__(cls, instance):
return True
for attr in cls.__protocol_attrs__:
try:
val = inspect.getattr_static(instance, attr)
except AttributeError:
break
# this attribute is set by @runtime_checkable:
if val is None and attr not in cls.__non_callable_proto_members__:
break
else:
return True
return False
def __eq__(cls, other):
# Hack so that typing.Generic.__class_getitem__
# treats typing_extensions.Protocol
# as equivalent to typing.Protocol
if abc.ABCMeta.__eq__(cls, other) is True:
return True
return cls is Protocol and other is typing.Protocol
# This has to be defined, or the abc-module cache
# complains about classes with this metaclass being unhashable,
# if we define only __eq__!
def __hash__(cls) -> int:
return type.__hash__(cls)
@classmethod
def _proto_hook(cls, other):
if not cls.__dict__.get('_is_protocol', False):
return NotImplemented
for attr in cls.__protocol_attrs__:
for base in other.__mro__:
# Check if the members appears in the class dictionary...
if attr in base.__dict__:
if base.__dict__[attr] is None:
return NotImplemented
break
# ...or in annotations, if it is a sub-protocol.
annotations = getattr(base, '__annotations__', {})
if (
isinstance(annotations, collections.abc.Mapping)
and attr in annotations
and is_protocol(other)
):
break
else:
return NotImplemented
return True
class Protocol(typing.Generic, metaclass=_ProtocolMeta):
__doc__ = typing.Protocol.__doc__
__slots__ = ()
_is_protocol = True
_is_runtime_protocol = False
def __init_subclass__(cls, *args, **kwargs):
super().__init_subclass__(*args, **kwargs)
# Determine if this is a protocol or a concrete subclass.
if not cls.__dict__.get('_is_protocol', False):
cls._is_protocol = any(b is Protocol for b in cls.__bases__)
# Set (or override) the protocol subclass hook.
if '__subclasshook__' not in cls.__dict__:
cls.__subclasshook__ = _proto_hook
# Prohibit instantiation for protocol classes
if cls._is_protocol and cls.__init__ is Protocol.__init__:
cls.__init__ = _no_init
if sys.version_info >= (3, 13):
runtime_checkable = typing.runtime_checkable
else:
def runtime_checkable(cls):
"""Mark a protocol class as a runtime protocol.
Such protocol can be used with isinstance() and issubclass().
Raise TypeError if applied to a non-protocol class.
This allows a simple-minded structural check very similar to
one trick ponies in collections.abc such as Iterable.
For example::
@runtime_checkable
class Closable(Protocol):
def close(self): ...
assert isinstance(open('/some/file'), Closable)
Warning: this will check only the presence of the required methods,
not their type signatures!
"""
if not issubclass(cls, typing.Generic) or not getattr(cls, '_is_protocol', False):
raise TypeError(f'@runtime_checkable can be only applied to protocol classes,'
f' got {cls!r}')
cls._is_runtime_protocol = True
# typing.Protocol classes on <=3.11 break if we execute this block,
# because typing.Protocol classes on <=3.11 don't have a
# `__protocol_attrs__` attribute, and this block relies on the
# `__protocol_attrs__` attribute. Meanwhile, typing.Protocol classes on 3.12.2+
# break if we *don't* execute this block, because *they* assume that all
# protocol classes have a `__non_callable_proto_members__` attribute
# (which this block sets)
if isinstance(cls, _ProtocolMeta) or sys.version_info >= (3, 12, 2):
# PEP 544 prohibits using issubclass()
# with protocols that have non-method members.
# See gh-113320 for why we compute this attribute here,
# rather than in `_ProtocolMeta.__init__`
cls.__non_callable_proto_members__ = set()
for attr in cls.__protocol_attrs__:
try:
is_callable = callable(getattr(cls, attr, None))
except Exception as e:
raise TypeError(
f"Failed to determine whether protocol member {attr!r} "
"is a method member"
) from e
else:
if not is_callable:
cls.__non_callable_proto_members__.add(attr)
return cls
# The "runtime" alias exists for backwards compatibility.
runtime = runtime_checkable
# Our version of runtime-checkable protocols is faster on Python 3.8-3.11
if sys.version_info >= (3, 12):
SupportsInt = typing.SupportsInt
SupportsFloat = typing.SupportsFloat
SupportsComplex = typing.SupportsComplex
SupportsBytes = typing.SupportsBytes
SupportsIndex = typing.SupportsIndex
SupportsAbs = typing.SupportsAbs
SupportsRound = typing.SupportsRound
else:
@runtime_checkable
class SupportsInt(Protocol):
"""An ABC with one abstract method __int__."""
__slots__ = ()
@abc.abstractmethod
def __int__(self) -> int:
pass
@runtime_checkable
class SupportsFloat(Protocol):
"""An ABC with one abstract method __float__."""
__slots__ = ()
@abc.abstractmethod
def __float__(self) -> float:
pass
@runtime_checkable
class SupportsComplex(Protocol):
"""An ABC with one abstract method __complex__."""
__slots__ = ()
@abc.abstractmethod
def __complex__(self) -> complex:
pass
@runtime_checkable
class SupportsBytes(Protocol):
"""An ABC with one abstract method __bytes__."""
__slots__ = ()
@abc.abstractmethod
def __bytes__(self) -> bytes:
pass
@runtime_checkable
class SupportsIndex(Protocol):
__slots__ = ()
@abc.abstractmethod
def __index__(self) -> int:
pass
@runtime_checkable
class SupportsAbs(Protocol[T_co]):
"""
An ABC with one abstract method __abs__ that is covariant in its return type.
"""
__slots__ = ()
@abc.abstractmethod
def __abs__(self) -> T_co:
pass
@runtime_checkable
class SupportsRound(Protocol[T_co]):
"""
An ABC with one abstract method __round__ that is covariant in its return type.
"""
__slots__ = ()
@abc.abstractmethod
def __round__(self, ndigits: int = 0) -> T_co:
pass
def _ensure_subclassable(mro_entries):
def inner(func):
if sys.implementation.name == "pypy" and sys.version_info < (3, 9):
cls_dict = {
"__call__": staticmethod(func),
"__mro_entries__": staticmethod(mro_entries)
}
t = type(func.__name__, (), cls_dict)
return functools.update_wrapper(t(), func)
else:
func.__mro_entries__ = mro_entries
return func
return inner
_NEEDS_SINGLETONMETA = (
not hasattr(typing, "NoDefault") or not hasattr(typing, "NoExtraItems")
)
if _NEEDS_SINGLETONMETA:
class SingletonMeta(type):
def __setattr__(cls, attr, value):
# TypeError is consistent with the behavior of NoneType
raise TypeError(
f"cannot set {attr!r} attribute of immutable type {cls.__name__!r}"
)
if hasattr(typing, "NoDefault"):
NoDefault = typing.NoDefault
else:
class NoDefaultType(metaclass=SingletonMeta):
"""The type of the NoDefault singleton."""
__slots__ = ()
def __new__(cls):
return globals().get("NoDefault") or object.__new__(cls)
def __repr__(self):
return "typing_extensions.NoDefault"
def __reduce__(self):
return "NoDefault"
NoDefault = NoDefaultType()
del NoDefaultType
if hasattr(typing, "NoExtraItems"):
NoExtraItems = typing.NoExtraItems
else:
class NoExtraItemsType(metaclass=SingletonMeta):
"""The type of the NoExtraItems singleton."""
__slots__ = ()
def __new__(cls):
return globals().get("NoExtraItems") or object.__new__(cls)
def __repr__(self):
return "typing_extensions.NoExtraItems"
def __reduce__(self):
return "NoExtraItems"
NoExtraItems = NoExtraItemsType()
del NoExtraItemsType
if _NEEDS_SINGLETONMETA:
del SingletonMeta
# Update this to something like >=3.13.0b1 if and when
# PEP 728 is implemented in CPython
_PEP_728_IMPLEMENTED = False
if _PEP_728_IMPLEMENTED:
# The standard library TypedDict in Python 3.8 does not store runtime information
# about which (if any) keys are optional. See https://bugs.python.org/issue38834
# The standard library TypedDict in Python 3.9.0/1 does not honour the "total"
# keyword with old-style TypedDict(). See https://bugs.python.org/issue42059
# The standard library TypedDict below Python 3.11 does not store runtime
# information about optional and required keys when using Required or NotRequired.
# Generic TypedDicts are also impossible using typing.TypedDict on Python <3.11.
# Aaaand on 3.12 we add __orig_bases__ to TypedDict
# to enable better runtime introspection.
# On 3.13 we deprecate some odd ways of creating TypedDicts.
# Also on 3.13, PEP 705 adds the ReadOnly[] qualifier.
# PEP 728 (still pending) makes more changes.
TypedDict = typing.TypedDict
_TypedDictMeta = typing._TypedDictMeta
is_typeddict = typing.is_typeddict
else:
# 3.10.0 and later
_TAKES_MODULE = "module" in inspect.signature(typing._type_check).parameters
def _get_typeddict_qualifiers(annotation_type):
while True:
annotation_origin = get_origin(annotation_type)
if annotation_origin is Annotated:
annotation_args = get_args(annotation_type)
if annotation_args:
annotation_type = annotation_args[0]
else:
break
elif annotation_origin is Required:
yield Required
annotation_type, = get_args(annotation_type)
elif annotation_origin is NotRequired:
yield NotRequired
annotation_type, = get_args(annotation_type)
elif annotation_origin is ReadOnly:
yield ReadOnly
annotation_type, = get_args(annotation_type)
else:
break
class _TypedDictMeta(type):
def __new__(cls, name, bases, ns, *, total=True, closed=None,
extra_items=NoExtraItems):
"""Create new typed dict class object.
This method is called when TypedDict is subclassed,
or when TypedDict is instantiated. This way
TypedDict supports all three syntax forms described in its docstring.
Subclasses and instances of TypedDict return actual dictionaries.
"""
for base in bases:
if type(base) is not _TypedDictMeta and base is not typing.Generic:
raise TypeError('cannot inherit from both a TypedDict type '
'and a non-TypedDict base class')
if closed is not None and extra_items is not NoExtraItems:
raise TypeError(f"Cannot combine closed={closed!r} and extra_items")
if any(issubclass(b, typing.Generic) for b in bases):
generic_base = (typing.Generic,)
else:
generic_base = ()
# typing.py generally doesn't let you inherit from plain Generic, unless
# the name of the class happens to be "Protocol"
tp_dict = type.__new__(_TypedDictMeta, "Protocol", (*generic_base, dict), ns)
tp_dict.__name__ = name
if tp_dict.__qualname__ == "Protocol":
tp_dict.__qualname__ = name
if not hasattr(tp_dict, '__orig_bases__'):
tp_dict.__orig_bases__ = bases
annotations = {}
if "__annotations__" in ns:
own_annotations = ns["__annotations__"]
elif "__annotate__" in ns:
# TODO: Use inspect.VALUE here, and make the annotations lazily evaluated
own_annotations = ns["__annotate__"](1)
else:
own_annotations = {}
msg = "TypedDict('Name', {f0: t0, f1: t1, ...}); each t must be a type"
if _TAKES_MODULE:
own_annotations = {
n: typing._type_check(tp, msg, module=tp_dict.__module__)
for n, tp in own_annotations.items()
}
else:
own_annotations = {
n: typing._type_check(tp, msg)
for n, tp in own_annotations.items()
}
required_keys = set()
optional_keys = set()
readonly_keys = set()
mutable_keys = set()
extra_items_type = extra_items
for base in bases:
base_dict = base.__dict__
annotations.update(base_dict.get('__annotations__', {}))
required_keys.update(base_dict.get('__required_keys__', ()))
optional_keys.update(base_dict.get('__optional_keys__', ()))
readonly_keys.update(base_dict.get('__readonly_keys__', ()))
mutable_keys.update(base_dict.get('__mutable_keys__', ()))
# This was specified in an earlier version of PEP 728. Support
# is retained for backwards compatibility, but only for Python
# 3.13 and lower.
if (closed and sys.version_info < (3, 14)
and "__extra_items__" in own_annotations):
annotation_type = own_annotations.pop("__extra_items__")
qualifiers = set(_get_typeddict_qualifiers(annotation_type))
if Required in qualifiers:
raise TypeError(
"Special key __extra_items__ does not support "
"Required"
)
if NotRequired in qualifiers:
raise TypeError(
"Special key __extra_items__ does not support "
"NotRequired"
)
extra_items_type = annotation_type
annotations.update(own_annotations)
for annotation_key, annotation_type in own_annotations.items():
qualifiers = set(_get_typeddict_qualifiers(annotation_type))
if Required in qualifiers:
required_keys.add(annotation_key)
elif NotRequired in qualifiers:
optional_keys.add(annotation_key)
elif total:
required_keys.add(annotation_key)
else:
optional_keys.add(annotation_key)
if ReadOnly in qualifiers:
mutable_keys.discard(annotation_key)
readonly_keys.add(annotation_key)
else:
mutable_keys.add(annotation_key)
readonly_keys.discard(annotation_key)
tp_dict.__annotations__ = annotations
tp_dict.__required_keys__ = frozenset(required_keys)
tp_dict.__optional_keys__ = frozenset(optional_keys)
tp_dict.__readonly_keys__ = frozenset(readonly_keys)
tp_dict.__mutable_keys__ = frozenset(mutable_keys)
tp_dict.__total__ = total
tp_dict.__closed__ = closed
tp_dict.__extra_items__ = extra_items_type
return tp_dict
__call__ = dict # static method
def __subclasscheck__(cls, other):
# Typed dicts are only for static structural subtyping.
raise TypeError('TypedDict does not support instance and class checks')
__instancecheck__ = __subclasscheck__
_TypedDict = type.__new__(_TypedDictMeta, 'TypedDict', (), {})
@_ensure_subclassable(lambda bases: (_TypedDict,))
def TypedDict(
typename,
fields=_marker,
/,
*,
total=True,
closed=None,
extra_items=NoExtraItems,
**kwargs
):
"""A simple typed namespace. At runtime it is equivalent to a plain dict.
TypedDict creates a dictionary type such that a type checker will expect all
instances to have a certain set of keys, where each key is
associated with a value of a consistent type. This expectation
is not checked at runtime.
Usage::
class Point2D(TypedDict):
x: int
y: int
label: str
a: Point2D = {'x': 1, 'y': 2, 'label': 'good'} # OK
b: Point2D = {'z': 3, 'label': 'bad'} # Fails type check
assert Point2D(x=1, y=2, label='first') == dict(x=1, y=2, label='first')
The type info can be accessed via the Point2D.__annotations__ dict, and
the Point2D.__required_keys__ and Point2D.__optional_keys__ frozensets.
TypedDict supports an additional equivalent form::
Point2D = TypedDict('Point2D', {'x': int, 'y': int, 'label': str})
By default, all keys must be present in a TypedDict. It is possible
to override this by specifying totality::
class Point2D(TypedDict, total=False):
x: int
y: int
This means that a Point2D TypedDict can have any of the keys omitted. A type
checker is only expected to support a literal False or True as the value of
the total argument. True is the default, and makes all items defined in the
class body be required.
The Required and NotRequired special forms can also be used to mark
individual keys as being required or not required::
class Point2D(TypedDict):
x: int # the "x" key must always be present (Required is the default)
y: NotRequired[int] # the "y" key can be omitted
See PEP 655 for more details on Required and NotRequired.
"""
if fields is _marker or fields is None:
if fields is _marker:
deprecated_thing = "Failing to pass a value for the 'fields' parameter"
else:
deprecated_thing = "Passing `None` as the 'fields' parameter"
example = f"`{typename} = TypedDict({typename!r}, {{}})`"
deprecation_msg = (
f"{deprecated_thing} is deprecated and will be disallowed in "
"Python 3.15. To create a TypedDict class with 0 fields "
"using the functional syntax, pass an empty dictionary, e.g. "
) + example + "."
warnings.warn(deprecation_msg, DeprecationWarning, stacklevel=2)
# Support a field called "closed"
if closed is not False and closed is not True and closed is not None:
kwargs["closed"] = closed
closed = None
# Or "extra_items"
if extra_items is not NoExtraItems:
kwargs["extra_items"] = extra_items
extra_items = NoExtraItems
fields = kwargs
elif kwargs:
raise TypeError("TypedDict takes either a dict or keyword arguments,"
" but not both")
if kwargs:
if sys.version_info >= (3, 13):
raise TypeError("TypedDict takes no keyword arguments")
warnings.warn(
"The kwargs-based syntax for TypedDict definitions is deprecated "
"in Python 3.11, will be removed in Python 3.13, and may not be "
"understood by third-party type checkers.",
DeprecationWarning,
stacklevel=2,
)
ns = {'__annotations__': dict(fields)}
module = _caller()
if module is not None:
# Setting correct module is necessary to make typed dict classes pickleable.
ns['__module__'] = module
td = _TypedDictMeta(typename, (), ns, total=total, closed=closed,
extra_items=extra_items)
td.__orig_bases__ = (TypedDict,)
return td
if hasattr(typing, "_TypedDictMeta"):
_TYPEDDICT_TYPES = (typing._TypedDictMeta, _TypedDictMeta)
else:
_TYPEDDICT_TYPES = (_TypedDictMeta,)
def is_typeddict(tp):
"""Check if an annotation is a TypedDict class
For example::
class Film(TypedDict):
title: str
year: int
is_typeddict(Film) # => True
is_typeddict(Union[list, str]) # => False
"""
# On 3.8, this would otherwise return True
if hasattr(typing, "TypedDict") and tp is typing.TypedDict:
return False
return isinstance(tp, _TYPEDDICT_TYPES)
if hasattr(typing, "assert_type"):
assert_type = typing.assert_type
else:
def assert_type(val, typ, /):
"""Assert (to the type checker) that the value is of the given type.
When the type checker encounters a call to assert_type(), it
emits an error if the value is not of the specified type::
def greet(name: str) -> None:
assert_type(name, str) # ok
assert_type(name, int) # type checker error
At runtime this returns the first argument unchanged and otherwise
does nothing.
"""
return val
if hasattr(typing, "ReadOnly"): # 3.13+
get_type_hints = typing.get_type_hints
else: # <=3.13
# replaces _strip_annotations()
def _strip_extras(t):
"""Strips Annotated, Required and NotRequired from a given type."""
if isinstance(t, _AnnotatedAlias):
return _strip_extras(t.__origin__)
if hasattr(t, "__origin__") and t.__origin__ in (Required, NotRequired, ReadOnly):
return _strip_extras(t.__args__[0])
if isinstance(t, typing._GenericAlias):
stripped_args = tuple(_strip_extras(a) for a in t.__args__)
if stripped_args == t.__args__:
return t
return t.copy_with(stripped_args)
if hasattr(_types, "GenericAlias") and isinstance(t, _types.GenericAlias):
stripped_args = tuple(_strip_extras(a) for a in t.__args__)
if stripped_args == t.__args__:
return t
return _types.GenericAlias(t.__origin__, stripped_args)
if hasattr(_types, "UnionType") and isinstance(t, _types.UnionType):
stripped_args = tuple(_strip_extras(a) for a in t.__args__)
if stripped_args == t.__args__:
return t
return functools.reduce(operator.or_, stripped_args)
return t
def get_type_hints(obj, globalns=None, localns=None, include_extras=False):
"""Return type hints for an object.
This is often the same as obj.__annotations__, but it handles
forward references encoded as string literals, adds Optional[t] if a
default value equal to None is set and recursively replaces all
'Annotated[T, ...]', 'Required[T]' or 'NotRequired[T]' with 'T'
(unless 'include_extras=True').
The argument may be a module, class, method, or function. The annotations
are returned as a dictionary. For classes, annotations include also
inherited members.
TypeError is raised if the argument is not of a type that can contain
annotations, and an empty dictionary is returned if no annotations are
present.
BEWARE -- the behavior of globalns and localns is counterintuitive
(unless you are familiar with how eval() and exec() work). The
search order is locals first, then globals.
- If no dict arguments are passed, an attempt is made to use the
globals from obj (or the respective module's globals for classes),
and these are also used as the locals. If the object does not appear
to have globals, an empty dictionary is used.
- If one dict argument is passed, it is used for both globals and
locals.
- If two dict arguments are passed, they specify globals and
locals, respectively.
"""
if hasattr(typing, "Annotated"): # 3.9+
hint = typing.get_type_hints(
obj, globalns=globalns, localns=localns, include_extras=True
)
else: # 3.8
hint = typing.get_type_hints(obj, globalns=globalns, localns=localns)
if sys.version_info < (3, 11):
_clean_optional(obj, hint, globalns, localns)
if sys.version_info < (3, 9):
# In 3.8 eval_type does not flatten Optional[ForwardRef] correctly
# This will recreate and and cache Unions.
hint = {
k: (t
if get_origin(t) != Union
else Union[t.__args__])
for k, t in hint.items()
}
if include_extras:
return hint
return {k: _strip_extras(t) for k, t in hint.items()}
_NoneType = type(None)
def _could_be_inserted_optional(t):
"""detects Union[..., None] pattern"""
# 3.8+ compatible checking before _UnionGenericAlias
if get_origin(t) is not Union:
return False
# Assume if last argument is not None they are user defined
if t.__args__[-1] is not _NoneType:
return False
return True
# < 3.11
def _clean_optional(obj, hints, globalns=None, localns=None):
# reverts injected Union[..., None] cases from typing.get_type_hints
# when a None default value is used.
# see https://github.com/python/typing_extensions/issues/310
if not hints or isinstance(obj, type):
return
defaults = typing._get_defaults(obj) # avoid accessing __annotations___
if not defaults:
return
original_hints = obj.__annotations__
for name, value in hints.items():
# Not a Union[..., None] or replacement conditions not fullfilled
if (not _could_be_inserted_optional(value)
or name not in defaults
or defaults[name] is not None
):
continue
original_value = original_hints[name]
# value=NoneType should have caused a skip above but check for safety
if original_value is None:
original_value = _NoneType
# Forward reference
if isinstance(original_value, str):
if globalns is None:
if isinstance(obj, _types.ModuleType):
globalns = obj.__dict__
else:
nsobj = obj
# Find globalns for the unwrapped object.
while hasattr(nsobj, '__wrapped__'):
nsobj = nsobj.__wrapped__
globalns = getattr(nsobj, '__globals__', {})
if localns is None:
localns = globalns
elif localns is None:
localns = globalns
if sys.version_info < (3, 9):
original_value = ForwardRef(original_value)
else:
original_value = ForwardRef(
original_value,
is_argument=not isinstance(obj, _types.ModuleType)
)
original_evaluated = typing._eval_type(original_value, globalns, localns)
if sys.version_info < (3, 9) and get_origin(original_evaluated) is Union:
# Union[str, None, "str"] is not reduced to Union[str, None]
original_evaluated = Union[original_evaluated.__args__]
# Compare if values differ. Note that even if equal
# value might be cached by typing._tp_cache contrary to original_evaluated
if original_evaluated != value or (
# 3.10: ForwardRefs of UnionType might be turned into _UnionGenericAlias
hasattr(_types, "UnionType")
and isinstance(original_evaluated, _types.UnionType)
and not isinstance(value, _types.UnionType)
):
hints[name] = original_evaluated
# Python 3.9+ has PEP 593 (Annotated)
if hasattr(typing, 'Annotated'):
Annotated = typing.Annotated
# Not exported and not a public API, but needed for get_origin() and get_args()
# to work.
_AnnotatedAlias = typing._AnnotatedAlias
# 3.8
else:
class _AnnotatedAlias(typing._GenericAlias, _root=True):
"""Runtime representation of an annotated type.
At its core 'Annotated[t, dec1, dec2, ...]' is an alias for the type 't'
with extra annotations. The alias behaves like a normal typing alias,
instantiating is the same as instantiating the underlying type, binding
it to types is also the same.
"""
def __init__(self, origin, metadata):
if isinstance(origin, _AnnotatedAlias):
metadata = origin.__metadata__ + metadata
origin = origin.__origin__
super().__init__(origin, origin)
self.__metadata__ = metadata
def copy_with(self, params):
assert len(params) == 1
new_type = params[0]
return _AnnotatedAlias(new_type, self.__metadata__)
def __repr__(self):
return (f"typing_extensions.Annotated[{typing._type_repr(self.__origin__)}, "
f"{', '.join(repr(a) for a in self.__metadata__)}]")
def __reduce__(self):
return operator.getitem, (
Annotated, (self.__origin__, *self.__metadata__)
)
def __eq__(self, other):
if not isinstance(other, _AnnotatedAlias):
return NotImplemented
if self.__origin__ != other.__origin__:
return False
return self.__metadata__ == other.__metadata__
def __hash__(self):
return hash((self.__origin__, self.__metadata__))
class Annotated:
"""Add context specific metadata to a type.
Example: Annotated[int, runtime_check.Unsigned] indicates to the
hypothetical runtime_check module that this type is an unsigned int.
Every other consumer of this type can ignore this metadata and treat
this type as int.
The first argument to Annotated must be a valid type (and will be in
the __origin__ field), the remaining arguments are kept as a tuple in
the __extra__ field.
Details:
- It's an error to call `Annotated` with less than two arguments.
- Nested Annotated are flattened::
Annotated[Annotated[T, Ann1, Ann2], Ann3] == Annotated[T, Ann1, Ann2, Ann3]
- Instantiating an annotated type is equivalent to instantiating the
underlying type::
Annotated[C, Ann1](5) == C(5)
- Annotated can be used as a generic type alias::
Optimized = Annotated[T, runtime.Optimize()]
Optimized[int] == Annotated[int, runtime.Optimize()]
OptimizedList = Annotated[List[T], runtime.Optimize()]
OptimizedList[int] == Annotated[List[int], runtime.Optimize()]
"""
__slots__ = ()
def __new__(cls, *args, **kwargs):
raise TypeError("Type Annotated cannot be instantiated.")
@typing._tp_cache
def __class_getitem__(cls, params):
if not isinstance(params, tuple) or len(params) < 2:
raise TypeError("Annotated[...] should be used "
"with at least two arguments (a type and an "
"annotation).")
allowed_special_forms = (ClassVar, Final)
if get_origin(params[0]) in allowed_special_forms:
origin = params[0]
else:
msg = "Annotated[t, ...]: t must be a type."
origin = typing._type_check(params[0], msg)
metadata = tuple(params[1:])
return _AnnotatedAlias(origin, metadata)
def __init_subclass__(cls, *args, **kwargs):
raise TypeError(
f"Cannot subclass {cls.__module__}.Annotated"
)
# Python 3.8 has get_origin() and get_args() but those implementations aren't
# Annotated-aware, so we can't use those. Python 3.9's versions don't support
# ParamSpecArgs and ParamSpecKwargs, so only Python 3.10's versions will do.
if sys.version_info[:2] >= (3, 10):
get_origin = typing.get_origin
get_args = typing.get_args
# 3.8-3.9
else:
try:
# 3.9+
from typing import _BaseGenericAlias
except ImportError:
_BaseGenericAlias = typing._GenericAlias
try:
# 3.9+
from typing import GenericAlias as _typing_GenericAlias
except ImportError:
_typing_GenericAlias = typing._GenericAlias
def get_origin(tp):
"""Get the unsubscripted version of a type.
This supports generic types, Callable, Tuple, Union, Literal, Final, ClassVar
and Annotated. Return None for unsupported types. Examples::
get_origin(Literal[42]) is Literal
get_origin(int) is None
get_origin(ClassVar[int]) is ClassVar
get_origin(Generic) is Generic
get_origin(Generic[T]) is Generic
get_origin(Union[T, int]) is Union
get_origin(List[Tuple[T, T]][int]) == list
get_origin(P.args) is P
"""
if isinstance(tp, _AnnotatedAlias):
return Annotated
if isinstance(tp, (typing._GenericAlias, _typing_GenericAlias, _BaseGenericAlias,
ParamSpecArgs, ParamSpecKwargs)):
return tp.__origin__
if tp is typing.Generic:
return typing.Generic
return None
def get_args(tp):
"""Get type arguments with all substitutions performed.
For unions, basic simplifications used by Union constructor are performed.
Examples::
get_args(Dict[str, int]) == (str, int)
get_args(int) == ()
get_args(Union[int, Union[T, int], str][int]) == (int, str)
get_args(Union[int, Tuple[T, int]][str]) == (int, Tuple[str, int])
get_args(Callable[[], T][int]) == ([], int)
"""
if isinstance(tp, _AnnotatedAlias):
return (tp.__origin__, *tp.__metadata__)
if isinstance(tp, (typing._GenericAlias, _typing_GenericAlias)):
if getattr(tp, "_special", False):
return ()
res = tp.__args__
if get_origin(tp) is collections.abc.Callable and res[0] is not Ellipsis:
res = (list(res[:-1]), res[-1])
return res
return ()
# 3.10+
if hasattr(typing, 'TypeAlias'):
TypeAlias = typing.TypeAlias
# 3.9
elif sys.version_info[:2] >= (3, 9):
@_ExtensionsSpecialForm
def TypeAlias(self, parameters):
"""Special marker indicating that an assignment should
be recognized as a proper type alias definition by type
checkers.
For example::
Predicate: TypeAlias = Callable[..., bool]
It's invalid when used anywhere except as in the example above.
"""
raise TypeError(f"{self} is not subscriptable")
# 3.8
else:
TypeAlias = _ExtensionsSpecialForm(
'TypeAlias',
doc="""Special marker indicating that an assignment should
be recognized as a proper type alias definition by type
checkers.
For example::
Predicate: TypeAlias = Callable[..., bool]
It's invalid when used anywhere except as in the example
above."""
)
def _set_default(type_param, default):
type_param.has_default = lambda: default is not NoDefault
type_param.__default__ = default
def _set_module(typevarlike):
# for pickling:
def_mod = _caller(depth=3)
if def_mod != 'typing_extensions':
typevarlike.__module__ = def_mod
class _DefaultMixin:
"""Mixin for TypeVarLike defaults."""
__slots__ = ()
__init__ = _set_default
# Classes using this metaclass must provide a _backported_typevarlike ClassVar
class _TypeVarLikeMeta(type):
def __instancecheck__(cls, __instance: Any) -> bool:
return isinstance(__instance, cls._backported_typevarlike)
if _PEP_696_IMPLEMENTED:
from typing import TypeVar
else:
# Add default and infer_variance parameters from PEP 696 and 695
class TypeVar(metaclass=_TypeVarLikeMeta):
"""Type variable."""
_backported_typevarlike = typing.TypeVar
def __new__(cls, name, *constraints, bound=None,
covariant=False, contravariant=False,
default=NoDefault, infer_variance=False):
if hasattr(typing, "TypeAliasType"):
# PEP 695 implemented (3.12+), can pass infer_variance to typing.TypeVar
typevar = typing.TypeVar(name, *constraints, bound=bound,
covariant=covariant, contravariant=contravariant,
infer_variance=infer_variance)
else:
typevar = typing.TypeVar(name, *constraints, bound=bound,
covariant=covariant, contravariant=contravariant)
if infer_variance and (covariant or contravariant):
raise ValueError("Variance cannot be specified with infer_variance.")
typevar.__infer_variance__ = infer_variance
_set_default(typevar, default)
_set_module(typevar)
def _tvar_prepare_subst(alias, args):
if (
typevar.has_default()
and alias.__parameters__.index(typevar) == len(args)
):
args += (typevar.__default__,)
return args
typevar.__typing_prepare_subst__ = _tvar_prepare_subst
return typevar
def __init_subclass__(cls) -> None:
raise TypeError(f"type '{__name__}.TypeVar' is not an acceptable base type")
# Python 3.10+ has PEP 612
if hasattr(typing, 'ParamSpecArgs'):
ParamSpecArgs = typing.ParamSpecArgs
ParamSpecKwargs = typing.ParamSpecKwargs
# 3.8-3.9
else:
class _Immutable:
"""Mixin to indicate that object should not be copied."""
__slots__ = ()
def __copy__(self):
return self
def __deepcopy__(self, memo):
return self
class ParamSpecArgs(_Immutable):
"""The args for a ParamSpec object.
Given a ParamSpec object P, P.args is an instance of ParamSpecArgs.
ParamSpecArgs objects have a reference back to their ParamSpec:
P.args.__origin__ is P
This type is meant for runtime introspection and has no special meaning to
static type checkers.
"""
def __init__(self, origin):
self.__origin__ = origin
def __repr__(self):
return f"{self.__origin__.__name__}.args"
def __eq__(self, other):
if not isinstance(other, ParamSpecArgs):
return NotImplemented
return self.__origin__ == other.__origin__
class ParamSpecKwargs(_Immutable):
"""The kwargs for a ParamSpec object.
Given a ParamSpec object P, P.kwargs is an instance of ParamSpecKwargs.
ParamSpecKwargs objects have a reference back to their ParamSpec:
P.kwargs.__origin__ is P
This type is meant for runtime introspection and has no special meaning to
static type checkers.
"""
def __init__(self, origin):
self.__origin__ = origin
def __repr__(self):
return f"{self.__origin__.__name__}.kwargs"
def __eq__(self, other):
if not isinstance(other, ParamSpecKwargs):
return NotImplemented
return self.__origin__ == other.__origin__
if _PEP_696_IMPLEMENTED:
from typing import ParamSpec
# 3.10+
elif hasattr(typing, 'ParamSpec'):
# Add default parameter - PEP 696
class ParamSpec(metaclass=_TypeVarLikeMeta):
"""Parameter specification."""
_backported_typevarlike = typing.ParamSpec
def __new__(cls, name, *, bound=None,
covariant=False, contravariant=False,
infer_variance=False, default=NoDefault):
if hasattr(typing, "TypeAliasType"):
# PEP 695 implemented, can pass infer_variance to typing.TypeVar
paramspec = typing.ParamSpec(name, bound=bound,
covariant=covariant,
contravariant=contravariant,
infer_variance=infer_variance)
else:
paramspec = typing.ParamSpec(name, bound=bound,
covariant=covariant,
contravariant=contravariant)
paramspec.__infer_variance__ = infer_variance
_set_default(paramspec, default)
_set_module(paramspec)
def _paramspec_prepare_subst(alias, args):
params = alias.__parameters__
i = params.index(paramspec)
if i == len(args) and paramspec.has_default():
args = [*args, paramspec.__default__]
if i >= len(args):
raise TypeError(f"Too few arguments for {alias}")
# Special case where Z[[int, str, bool]] == Z[int, str, bool] in PEP 612.
if len(params) == 1 and not typing._is_param_expr(args[0]):
assert i == 0
args = (args,)
# Convert lists to tuples to help other libraries cache the results.
elif isinstance(args[i], list):
args = (*args[:i], tuple(args[i]), *args[i + 1:])
return args
paramspec.__typing_prepare_subst__ = _paramspec_prepare_subst
return paramspec
def __init_subclass__(cls) -> None:
raise TypeError(f"type '{__name__}.ParamSpec' is not an acceptable base type")
# 3.8-3.9
else:
# Inherits from list as a workaround for Callable checks in Python < 3.9.2.
class ParamSpec(list, _DefaultMixin):
"""Parameter specification variable.
Usage::
P = ParamSpec('P')
Parameter specification variables exist primarily for the benefit of static
type checkers. They are used to forward the parameter types of one
callable to another callable, a pattern commonly found in higher order
functions and decorators. They are only valid when used in ``Concatenate``,
or s the first argument to ``Callable``. In Python 3.10 and higher,
they are also supported in user-defined Generics at runtime.
See class Generic for more information on generic types. An
example for annotating a decorator::
T = TypeVar('T')
P = ParamSpec('P')
def add_logging(f: Callable[P, T]) -> Callable[P, T]:
'''A type-safe decorator to add logging to a function.'''
def inner(*args: P.args, **kwargs: P.kwargs) -> T:
logging.info(f'{f.__name__} was called')
return f(*args, **kwargs)
return inner
@add_logging
def add_two(x: float, y: float) -> float:
'''Add two numbers together.'''
return x + y
Parameter specification variables defined with covariant=True or
contravariant=True can be used to declare covariant or contravariant
generic types. These keyword arguments are valid, but their actual semantics
are yet to be decided. See PEP 612 for details.
Parameter specification variables can be introspected. e.g.:
P.__name__ == 'T'
P.__bound__ == None
P.__covariant__ == False
P.__contravariant__ == False
Note that only parameter specification variables defined in global scope can
be pickled.
"""
# Trick Generic __parameters__.
__class__ = typing.TypeVar
@property
def args(self):
return ParamSpecArgs(self)
@property
def kwargs(self):
return ParamSpecKwargs(self)
def __init__(self, name, *, bound=None, covariant=False, contravariant=False,
infer_variance=False, default=NoDefault):
list.__init__(self, [self])
self.__name__ = name
self.__covariant__ = bool(covariant)
self.__contravariant__ = bool(contravariant)
self.__infer_variance__ = bool(infer_variance)
if bound:
self.__bound__ = typing._type_check(bound, 'Bound must be a type.')
else:
self.__bound__ = None
_DefaultMixin.__init__(self, default)
# for pickling:
def_mod = _caller()
if def_mod != 'typing_extensions':
self.__module__ = def_mod
def __repr__(self):
if self.__infer_variance__:
prefix = ''
elif self.__covariant__:
prefix = '+'
elif self.__contravariant__:
prefix = '-'
else:
prefix = '~'
return prefix + self.__name__
def __hash__(self):
return object.__hash__(self)
def __eq__(self, other):
return self is other
def __reduce__(self):
return self.__name__
# Hack to get typing._type_check to pass.
def __call__(self, *args, **kwargs):
pass
# 3.8-3.9
if not hasattr(typing, 'Concatenate'):
# Inherits from list as a workaround for Callable checks in Python < 3.9.2.
# 3.9.0-1
if not hasattr(typing, '_type_convert'):
def _type_convert(arg, module=None, *, allow_special_forms=False):
"""For converting None to type(None), and strings to ForwardRef."""
if arg is None:
return type(None)
if isinstance(arg, str):
if sys.version_info <= (3, 9, 6):
return ForwardRef(arg)
if sys.version_info <= (3, 9, 7):
return ForwardRef(arg, module=module)
return ForwardRef(arg, module=module, is_class=allow_special_forms)
return arg
else:
_type_convert = typing._type_convert
class _ConcatenateGenericAlias(list):
# Trick Generic into looking into this for __parameters__.
__class__ = typing._GenericAlias
# Flag in 3.8.
_special = False
def __init__(self, origin, args):
super().__init__(args)
self.__origin__ = origin
self.__args__ = args
def __repr__(self):
_type_repr = typing._type_repr
return (f'{_type_repr(self.__origin__)}'
f'[{", ".join(_type_repr(arg) for arg in self.__args__)}]')
def __hash__(self):
return hash((self.__origin__, self.__args__))
# Hack to get typing._type_check to pass in Generic.
def __call__(self, *args, **kwargs):
pass
@property
def __parameters__(self):
return tuple(
tp for tp in self.__args__ if isinstance(tp, (typing.TypeVar, ParamSpec))
)
# 3.8; needed for typing._subst_tvars
# 3.9 used by __getitem__ below
def copy_with(self, params):
if isinstance(params[-1], _ConcatenateGenericAlias):
params = (*params[:-1], *params[-1].__args__)
elif isinstance(params[-1], (list, tuple)):
return (*params[:-1], *params[-1])
elif (not (params[-1] is ... or isinstance(params[-1], ParamSpec))):
raise TypeError("The last parameter to Concatenate should be a "
"ParamSpec variable or ellipsis.")
return self.__class__(self.__origin__, params)
# 3.9; accessed during GenericAlias.__getitem__ when substituting
def __getitem__(self, args):
if self.__origin__ in (Generic, Protocol):
# Can't subscript Generic[...] or Protocol[...].
raise TypeError(f"Cannot subscript already-subscripted {self}")
if not self.__parameters__:
raise TypeError(f"{self} is not a generic class")
if not isinstance(args, tuple):
args = (args,)
args = _unpack_args(*(_type_convert(p) for p in args))
params = self.__parameters__
for param in params:
prepare = getattr(param, "__typing_prepare_subst__", None)
if prepare is not None:
args = prepare(self, args)
# 3.8 - 3.9 & typing.ParamSpec
elif isinstance(param, ParamSpec):
i = params.index(param)
if (
i == len(args)
and getattr(param, '__default__', NoDefault) is not NoDefault
):
args = [*args, param.__default__]
if i >= len(args):
raise TypeError(f"Too few arguments for {self}")
# Special case for Z[[int, str, bool]] == Z[int, str, bool]
if len(params) == 1 and not _is_param_expr(args[0]):
assert i == 0
args = (args,)
elif (
isinstance(args[i], list)
# 3.8 - 3.9
# This class inherits from list do not convert
and not isinstance(args[i], _ConcatenateGenericAlias)
):
args = (*args[:i], tuple(args[i]), *args[i + 1:])
alen = len(args)
plen = len(params)
if alen != plen:
raise TypeError(
f"Too {'many' if alen > plen else 'few'} arguments for {self};"
f" actual {alen}, expected {plen}"
)
subst = dict(zip(self.__parameters__, args))
# determine new args
new_args = []
for arg in self.__args__:
if isinstance(arg, type):
new_args.append(arg)
continue
if isinstance(arg, TypeVar):
arg = subst[arg]
if (
(isinstance(arg, typing._GenericAlias) and _is_unpack(arg))
or (
hasattr(_types, "GenericAlias")
and isinstance(arg, _types.GenericAlias)
and getattr(arg, "__unpacked__", False)
)
):
raise TypeError(f"{arg} is not valid as type argument")
elif isinstance(arg,
typing._GenericAlias
if not hasattr(_types, "GenericAlias") else
(typing._GenericAlias, _types.GenericAlias)
):
subparams = arg.__parameters__
if subparams:
subargs = tuple(subst[x] for x in subparams)
arg = arg[subargs]
new_args.append(arg)
return self.copy_with(tuple(new_args))
# 3.10+
else:
_ConcatenateGenericAlias = typing._ConcatenateGenericAlias
# 3.10
if sys.version_info < (3, 11):
class _ConcatenateGenericAlias(typing._ConcatenateGenericAlias, _root=True):
# needed for checks in collections.abc.Callable to accept this class
__module__ = "typing"
def copy_with(self, params):
if isinstance(params[-1], (list, tuple)):
return (*params[:-1], *params[-1])
if isinstance(params[-1], typing._ConcatenateGenericAlias):
params = (*params[:-1], *params[-1].__args__)
elif not (params[-1] is ... or isinstance(params[-1], ParamSpec)):
raise TypeError("The last parameter to Concatenate should be a "
"ParamSpec variable or ellipsis.")
return super(typing._ConcatenateGenericAlias, self).copy_with(params)
def __getitem__(self, args):
value = super().__getitem__(args)
if isinstance(value, tuple) and any(_is_unpack(t) for t in value):
return tuple(_unpack_args(*(n for n in value)))
return value
# 3.8-3.9.2
class _EllipsisDummy: ...
# 3.8-3.10
def _create_concatenate_alias(origin, parameters):
if parameters[-1] is ... and sys.version_info < (3, 9, 2):
# Hack: Arguments must be types, replace it with one.
parameters = (*parameters[:-1], _EllipsisDummy)
if sys.version_info >= (3, 10, 3):
concatenate = _ConcatenateGenericAlias(origin, parameters,
_typevar_types=(TypeVar, ParamSpec),
_paramspec_tvars=True)
else:
concatenate = _ConcatenateGenericAlias(origin, parameters)
if parameters[-1] is not _EllipsisDummy:
return concatenate
# Remove dummy again
concatenate.__args__ = tuple(p if p is not _EllipsisDummy else ...
for p in concatenate.__args__)
if sys.version_info < (3, 10):
# backport needs __args__ adjustment only
return concatenate
concatenate.__parameters__ = tuple(p for p in concatenate.__parameters__
if p is not _EllipsisDummy)
return concatenate
# 3.8-3.10
@typing._tp_cache
def _concatenate_getitem(self, parameters):
if parameters == ():
raise TypeError("Cannot take a Concatenate of no types.")
if not isinstance(parameters, tuple):
parameters = (parameters,)
if not (parameters[-1] is ... or isinstance(parameters[-1], ParamSpec)):
raise TypeError("The last parameter to Concatenate should be a "
"ParamSpec variable or ellipsis.")
msg = "Concatenate[arg, ...]: each arg must be a type."
parameters = (*(typing._type_check(p, msg) for p in parameters[:-1]),
parameters[-1])
return _create_concatenate_alias(self, parameters)
# 3.11+; Concatenate does not accept ellipsis in 3.10
if sys.version_info >= (3, 11):
Concatenate = typing.Concatenate
# 3.9-3.10
elif sys.version_info[:2] >= (3, 9):
@_ExtensionsSpecialForm
def Concatenate(self, parameters):
"""Used in conjunction with ``ParamSpec`` and ``Callable`` to represent a
higher order function which adds, removes or transforms parameters of a
callable.
For example::
Callable[Concatenate[int, P], int]
See PEP 612 for detailed information.
"""
return _concatenate_getitem(self, parameters)
# 3.8
else:
class _ConcatenateForm(_ExtensionsSpecialForm, _root=True):
def __getitem__(self, parameters):
return _concatenate_getitem(self, parameters)
Concatenate = _ConcatenateForm(
'Concatenate',
doc="""Used in conjunction with ``ParamSpec`` and ``Callable`` to represent a
higher order function which adds, removes or transforms parameters of a
callable.
For example::
Callable[Concatenate[int, P], int]
See PEP 612 for detailed information.
""")
# 3.10+
if hasattr(typing, 'TypeGuard'):
TypeGuard = typing.TypeGuard
# 3.9
elif sys.version_info[:2] >= (3, 9):
@_ExtensionsSpecialForm
def TypeGuard(self, parameters):
"""Special typing form used to annotate the return type of a user-defined
type guard function. ``TypeGuard`` only accepts a single type argument.
At runtime, functions marked this way should return a boolean.
``TypeGuard`` aims to benefit *type narrowing* -- a technique used by static
type checkers to determine a more precise type of an expression within a
program's code flow. Usually type narrowing is done by analyzing
conditional code flow and applying the narrowing to a block of code. The
conditional expression here is sometimes referred to as a "type guard".
Sometimes it would be convenient to use a user-defined boolean function
as a type guard. Such a function should use ``TypeGuard[...]`` as its
return type to alert static type checkers to this intention.
Using ``-> TypeGuard`` tells the static type checker that for a given
function:
1. The return value is a boolean.
2. If the return value is ``True``, the type of its argument
is the type inside ``TypeGuard``.
For example::
def is_str(val: Union[str, float]):
# "isinstance" type guard
if isinstance(val, str):
# Type of ``val`` is narrowed to ``str``
...
else:
# Else, type of ``val`` is narrowed to ``float``.
...
Strict type narrowing is not enforced -- ``TypeB`` need not be a narrower
form of ``TypeA`` (it can even be a wider form) and this may lead to
type-unsafe results. The main reason is to allow for things like
narrowing ``List[object]`` to ``List[str]`` even though the latter is not
a subtype of the former, since ``List`` is invariant. The responsibility of
writing type-safe type guards is left to the user.
``TypeGuard`` also works with type variables. For more information, see
PEP 647 (User-Defined Type Guards).
"""
item = typing._type_check(parameters, f'{self} accepts only a single type.')
return typing._GenericAlias(self, (item,))
# 3.8
else:
class _TypeGuardForm(_ExtensionsSpecialForm, _root=True):
def __getitem__(self, parameters):
item = typing._type_check(parameters,
f'{self._name} accepts only a single type')
return typing._GenericAlias(self, (item,))
TypeGuard = _TypeGuardForm(
'TypeGuard',
doc="""Special typing form used to annotate the return type of a user-defined
type guard function. ``TypeGuard`` only accepts a single type argument.
At runtime, functions marked this way should return a boolean.
``TypeGuard`` aims to benefit *type narrowing* -- a technique used by static
type checkers to determine a more precise type of an expression within a
program's code flow. Usually type narrowing is done by analyzing
conditional code flow and applying the narrowing to a block of code. The
conditional expression here is sometimes referred to as a "type guard".
Sometimes it would be convenient to use a user-defined boolean function
as a type guard. Such a function should use ``TypeGuard[...]`` as its
return type to alert static type checkers to this intention.
Using ``-> TypeGuard`` tells the static type checker that for a given
function:
1. The return value is a boolean.
2. If the return value is ``True``, the type of its argument
is the type inside ``TypeGuard``.
For example::
def is_str(val: Union[str, float]):
# "isinstance" type guard
if isinstance(val, str):
# Type of ``val`` is narrowed to ``str``
...
else:
# Else, type of ``val`` is narrowed to ``float``.
...
Strict type narrowing is not enforced -- ``TypeB`` need not be a narrower
form of ``TypeA`` (it can even be a wider form) and this may lead to
type-unsafe results. The main reason is to allow for things like
narrowing ``List[object]`` to ``List[str]`` even though the latter is not
a subtype of the former, since ``List`` is invariant. The responsibility of
writing type-safe type guards is left to the user.
``TypeGuard`` also works with type variables. For more information, see
PEP 647 (User-Defined Type Guards).
""")
# 3.13+
if hasattr(typing, 'TypeIs'):
TypeIs = typing.TypeIs
# 3.9
elif sys.version_info[:2] >= (3, 9):
@_ExtensionsSpecialForm
def TypeIs(self, parameters):
"""Special typing form used to annotate the return type of a user-defined
type narrower function. ``TypeIs`` only accepts a single type argument.
At runtime, functions marked this way should return a boolean.
``TypeIs`` aims to benefit *type narrowing* -- a technique used by static
type checkers to determine a more precise type of an expression within a
program's code flow. Usually type narrowing is done by analyzing
conditional code flow and applying the narrowing to a block of code. The
conditional expression here is sometimes referred to as a "type guard".
Sometimes it would be convenient to use a user-defined boolean function
as a type guard. Such a function should use ``TypeIs[...]`` as its
return type to alert static type checkers to this intention.
Using ``-> TypeIs`` tells the static type checker that for a given
function:
1. The return value is a boolean.
2. If the return value is ``True``, the type of its argument
is the intersection of the type inside ``TypeIs`` and the argument's
previously known type.
For example::
def is_awaitable(val: object) -> TypeIs[Awaitable[Any]]:
return hasattr(val, '__await__')
def f(val: Union[int, Awaitable[int]]) -> int:
if is_awaitable(val):
assert_type(val, Awaitable[int])
else:
assert_type(val, int)
``TypeIs`` also works with type variables. For more information, see
PEP 742 (Narrowing types with TypeIs).
"""
item = typing._type_check(parameters, f'{self} accepts only a single type.')
return typing._GenericAlias(self, (item,))
# 3.8
else:
class _TypeIsForm(_ExtensionsSpecialForm, _root=True):
def __getitem__(self, parameters):
item = typing._type_check(parameters,
f'{self._name} accepts only a single type')
return typing._GenericAlias(self, (item,))
TypeIs = _TypeIsForm(
'TypeIs',
doc="""Special typing form used to annotate the return type of a user-defined
type narrower function. ``TypeIs`` only accepts a single type argument.
At runtime, functions marked this way should return a boolean.
``TypeIs`` aims to benefit *type narrowing* -- a technique used by static
type checkers to determine a more precise type of an expression within a
program's code flow. Usually type narrowing is done by analyzing
conditional code flow and applying the narrowing to a block of code. The
conditional expression here is sometimes referred to as a "type guard".
Sometimes it would be convenient to use a user-defined boolean function
as a type guard. Such a function should use ``TypeIs[...]`` as its
return type to alert static type checkers to this intention.
Using ``-> TypeIs`` tells the static type checker that for a given
function:
1. The return value is a boolean.
2. If the return value is ``True``, the type of its argument
is the intersection of the type inside ``TypeIs`` and the argument's
previously known type.
For example::
def is_awaitable(val: object) -> TypeIs[Awaitable[Any]]:
return hasattr(val, '__await__')
def f(val: Union[int, Awaitable[int]]) -> int:
if is_awaitable(val):
assert_type(val, Awaitable[int])
else:
assert_type(val, int)
``TypeIs`` also works with type variables. For more information, see
PEP 742 (Narrowing types with TypeIs).
""")
# 3.14+?
if hasattr(typing, 'TypeForm'):
TypeForm = typing.TypeForm
# 3.9
elif sys.version_info[:2] >= (3, 9):
class _TypeFormForm(_ExtensionsSpecialForm, _root=True):
# TypeForm(X) is equivalent to X but indicates to the type checker
# that the object is a TypeForm.
def __call__(self, obj, /):
return obj
@_TypeFormForm
def TypeForm(self, parameters):
"""A special form representing the value that results from the evaluation
of a type expression. This value encodes the information supplied in the
type expression, and it represents the type described by that type expression.
When used in a type expression, TypeForm describes a set of type form objects.
It accepts a single type argument, which must be a valid type expression.
``TypeForm[T]`` describes the set of all type form objects that represent
the type T or types that are assignable to T.
Usage:
def cast[T](typ: TypeForm[T], value: Any) -> T: ...
reveal_type(cast(int, "x")) # int
See PEP 747 for more information.
"""
item = typing._type_check(parameters, f'{self} accepts only a single type.')
return typing._GenericAlias(self, (item,))
# 3.8
else:
class _TypeFormForm(_ExtensionsSpecialForm, _root=True):
def __getitem__(self, parameters):
item = typing._type_check(parameters,
f'{self._name} accepts only a single type')
return typing._GenericAlias(self, (item,))
def __call__(self, obj, /):
return obj
TypeForm = _TypeFormForm(
'TypeForm',
doc="""A special form representing the value that results from the evaluation
of a type expression. This value encodes the information supplied in the
type expression, and it represents the type described by that type expression.
When used in a type expression, TypeForm describes a set of type form objects.
It accepts a single type argument, which must be a valid type expression.
``TypeForm[T]`` describes the set of all type form objects that represent
the type T or types that are assignable to T.
Usage:
def cast[T](typ: TypeForm[T], value: Any) -> T: ...
reveal_type(cast(int, "x")) # int
See PEP 747 for more information.
""")
# Vendored from cpython typing._SpecialFrom
class _SpecialForm(typing._Final, _root=True):
__slots__ = ('_name', '__doc__', '_getitem')
def __init__(self, getitem):
self._getitem = getitem
self._name = getitem.__name__
self.__doc__ = getitem.__doc__
def __getattr__(self, item):
if item in {'__name__', '__qualname__'}:
return self._name
raise AttributeError(item)
def __mro_entries__(self, bases):
raise TypeError(f"Cannot subclass {self!r}")
def __repr__(self):
return f'typing_extensions.{self._name}'
def __reduce__(self):
return self._name
def __call__(self, *args, **kwds):
raise TypeError(f"Cannot instantiate {self!r}")
def __or__(self, other):
return typing.Union[self, other]
def __ror__(self, other):
return typing.Union[other, self]
def __instancecheck__(self, obj):
raise TypeError(f"{self} cannot be used with isinstance()")
def __subclasscheck__(self, cls):
raise TypeError(f"{self} cannot be used with issubclass()")
@typing._tp_cache
def __getitem__(self, parameters):
return self._getitem(self, parameters)
if hasattr(typing, "LiteralString"): # 3.11+
LiteralString = typing.LiteralString
else:
@_SpecialForm
def LiteralString(self, params):
"""Represents an arbitrary literal string.
Example::
from typing_extensions import LiteralString
def query(sql: LiteralString) -> ...:
...
query("SELECT * FROM table") # ok
query(f"SELECT * FROM {input()}") # not ok
See PEP 675 for details.
"""
raise TypeError(f"{self} is not subscriptable")
if hasattr(typing, "Self"): # 3.11+
Self = typing.Self
else:
@_SpecialForm
def Self(self, params):
"""Used to spell the type of "self" in classes.
Example::
from typing import Self
class ReturnsSelf:
def parse(self, data: bytes) -> Self:
...
return self
"""
raise TypeError(f"{self} is not subscriptable")
if hasattr(typing, "Never"): # 3.11+
Never = typing.Never
else:
@_SpecialForm
def Never(self, params):
"""The bottom type, a type that has no members.
This can be used to define a function that should never be
called, or a function that never returns::
from typing_extensions import Never
def never_call_me(arg: Never) -> None:
pass
def int_or_str(arg: int | str) -> None:
never_call_me(arg) # type checker error
match arg:
case int():
print("It's an int")
case str():
print("It's a str")
case _:
never_call_me(arg) # ok, arg is of type Never
"""
raise TypeError(f"{self} is not subscriptable")
if hasattr(typing, 'Required'): # 3.11+
Required = typing.Required
NotRequired = typing.NotRequired
elif sys.version_info[:2] >= (3, 9): # 3.9-3.10
@_ExtensionsSpecialForm
def Required(self, parameters):
"""A special typing construct to mark a key of a total=False TypedDict
as required. For example:
class Movie(TypedDict, total=False):
title: Required[str]
year: int
m = Movie(
title='The Matrix', # typechecker error if key is omitted
year=1999,
)
There is no runtime checking that a required key is actually provided
when instantiating a related TypedDict.
"""
item = typing._type_check(parameters, f'{self._name} accepts only a single type.')
return typing._GenericAlias(self, (item,))
@_ExtensionsSpecialForm
def NotRequired(self, parameters):
"""A special typing construct to mark a key of a TypedDict as
potentially missing. For example:
class Movie(TypedDict):
title: str
year: NotRequired[int]
m = Movie(
title='The Matrix', # typechecker error if key is omitted
year=1999,
)
"""
item = typing._type_check(parameters, f'{self._name} accepts only a single type.')
return typing._GenericAlias(self, (item,))
else: # 3.8
class _RequiredForm(_ExtensionsSpecialForm, _root=True):
def __getitem__(self, parameters):
item = typing._type_check(parameters,
f'{self._name} accepts only a single type.')
return typing._GenericAlias(self, (item,))
Required = _RequiredForm(
'Required',
doc="""A special typing construct to mark a key of a total=False TypedDict
as required. For example:
class Movie(TypedDict, total=False):
title: Required[str]
year: int
m = Movie(
title='The Matrix', # typechecker error if key is omitted
year=1999,
)
There is no runtime checking that a required key is actually provided
when instantiating a related TypedDict.
""")
NotRequired = _RequiredForm(
'NotRequired',
doc="""A special typing construct to mark a key of a TypedDict as
potentially missing. For example:
class Movie(TypedDict):
title: str
year: NotRequired[int]
m = Movie(
title='The Matrix', # typechecker error if key is omitted
year=1999,
)
""")
if hasattr(typing, 'ReadOnly'):
ReadOnly = typing.ReadOnly
elif sys.version_info[:2] >= (3, 9): # 3.9-3.12
@_ExtensionsSpecialForm
def ReadOnly(self, parameters):
"""A special typing construct to mark an item of a TypedDict as read-only.
For example:
class Movie(TypedDict):
title: ReadOnly[str]
year: int
def mutate_movie(m: Movie) -> None:
m["year"] = 1992 # allowed
m["title"] = "The Matrix" # typechecker error
There is no runtime checking for this property.
"""
item = typing._type_check(parameters, f'{self._name} accepts only a single type.')
return typing._GenericAlias(self, (item,))
else: # 3.8
class _ReadOnlyForm(_ExtensionsSpecialForm, _root=True):
def __getitem__(self, parameters):
item = typing._type_check(parameters,
f'{self._name} accepts only a single type.')
return typing._GenericAlias(self, (item,))
ReadOnly = _ReadOnlyForm(
'ReadOnly',
doc="""A special typing construct to mark a key of a TypedDict as read-only.
For example:
class Movie(TypedDict):
title: ReadOnly[str]
year: int
def mutate_movie(m: Movie) -> None:
m["year"] = 1992 # allowed
m["title"] = "The Matrix" # typechecker error
There is no runtime checking for this propery.
""")
_UNPACK_DOC = """\
Type unpack operator.
The type unpack operator takes the child types from some container type,
such as `tuple[int, str]` or a `TypeVarTuple`, and 'pulls them out'. For
example:
# For some generic class `Foo`:
Foo[Unpack[tuple[int, str]]] # Equivalent to Foo[int, str]
Ts = TypeVarTuple('Ts')
# Specifies that `Bar` is generic in an arbitrary number of types.
# (Think of `Ts` as a tuple of an arbitrary number of individual
# `TypeVar`s, which the `Unpack` is 'pulling out' directly into the
# `Generic[]`.)
class Bar(Generic[Unpack[Ts]]): ...
Bar[int] # Valid
Bar[int, str] # Also valid
From Python 3.11, this can also be done using the `*` operator:
Foo[*tuple[int, str]]
class Bar(Generic[*Ts]): ...
The operator can also be used along with a `TypedDict` to annotate
`**kwargs` in a function signature. For instance:
class Movie(TypedDict):
name: str
year: int
# This function expects two keyword arguments - *name* of type `str` and
# *year* of type `int`.
def foo(**kwargs: Unpack[Movie]): ...
Note that there is only some runtime checking of this operator. Not
everything the runtime allows may be accepted by static type checkers.
For more information, see PEP 646 and PEP 692.
"""
if sys.version_info >= (3, 12): # PEP 692 changed the repr of Unpack[]
Unpack = typing.Unpack
def _is_unpack(obj):
return get_origin(obj) is Unpack
elif sys.version_info[:2] >= (3, 9): # 3.9+
class _UnpackSpecialForm(_ExtensionsSpecialForm, _root=True):
def __init__(self, getitem):
super().__init__(getitem)
self.__doc__ = _UNPACK_DOC
class _UnpackAlias(typing._GenericAlias, _root=True):
if sys.version_info < (3, 11):
# needed for compatibility with Generic[Unpack[Ts]]
__class__ = typing.TypeVar
@property
def __typing_unpacked_tuple_args__(self):
assert self.__origin__ is Unpack
assert len(self.__args__) == 1
arg, = self.__args__
if isinstance(arg, (typing._GenericAlias, _types.GenericAlias)):
if arg.__origin__ is not tuple:
raise TypeError("Unpack[...] must be used with a tuple type")
return arg.__args__
return None
@property
def __typing_is_unpacked_typevartuple__(self):
assert self.__origin__ is Unpack
assert len(self.__args__) == 1
return isinstance(self.__args__[0], TypeVarTuple)
def __getitem__(self, args):
if self.__typing_is_unpacked_typevartuple__:
return args
return super().__getitem__(args)
@_UnpackSpecialForm
def Unpack(self, parameters):
item = typing._type_check(parameters, f'{self._name} accepts only a single type.')
return _UnpackAlias(self, (item,))
def _is_unpack(obj):
return isinstance(obj, _UnpackAlias)
else: # 3.8
class _UnpackAlias(typing._GenericAlias, _root=True):
__class__ = typing.TypeVar
@property
def __typing_unpacked_tuple_args__(self):
assert self.__origin__ is Unpack
assert len(self.__args__) == 1
arg, = self.__args__
if isinstance(arg, typing._GenericAlias):
if arg.__origin__ is not tuple:
raise TypeError("Unpack[...] must be used with a tuple type")
return arg.__args__
return None
@property
def __typing_is_unpacked_typevartuple__(self):
assert self.__origin__ is Unpack
assert len(self.__args__) == 1
return isinstance(self.__args__[0], TypeVarTuple)
def __getitem__(self, args):
if self.__typing_is_unpacked_typevartuple__:
return args
return super().__getitem__(args)
class _UnpackForm(_ExtensionsSpecialForm, _root=True):
def __getitem__(self, parameters):
item = typing._type_check(parameters,
f'{self._name} accepts only a single type.')
return _UnpackAlias(self, (item,))
Unpack = _UnpackForm('Unpack', doc=_UNPACK_DOC)
def _is_unpack(obj):
return isinstance(obj, _UnpackAlias)
def _unpack_args(*args):
newargs = []
for arg in args:
subargs = getattr(arg, '__typing_unpacked_tuple_args__', None)
if subargs is not None and (not (subargs and subargs[-1] is ...)):
newargs.extend(subargs)
else:
newargs.append(arg)
return newargs
if _PEP_696_IMPLEMENTED:
from typing import TypeVarTuple
elif hasattr(typing, "TypeVarTuple"): # 3.11+
# Add default parameter - PEP 696
class TypeVarTuple(metaclass=_TypeVarLikeMeta):
"""Type variable tuple."""
_backported_typevarlike = typing.TypeVarTuple
def __new__(cls, name, *, default=NoDefault):
tvt = typing.TypeVarTuple(name)
_set_default(tvt, default)
_set_module(tvt)
def _typevartuple_prepare_subst(alias, args):
params = alias.__parameters__
typevartuple_index = params.index(tvt)
for param in params[typevartuple_index + 1:]:
if isinstance(param, TypeVarTuple):
raise TypeError(
f"More than one TypeVarTuple parameter in {alias}"
)
alen = len(args)
plen = len(params)
left = typevartuple_index
right = plen - typevartuple_index - 1
var_tuple_index = None
fillarg = None
for k, arg in enumerate(args):
if not isinstance(arg, type):
subargs = getattr(arg, '__typing_unpacked_tuple_args__', None)
if subargs and len(subargs) == 2 and subargs[-1] is ...:
if var_tuple_index is not None:
raise TypeError(
"More than one unpacked "
"arbitrary-length tuple argument"
)
var_tuple_index = k
fillarg = subargs[0]
if var_tuple_index is not None:
left = min(left, var_tuple_index)
right = min(right, alen - var_tuple_index - 1)
elif left + right > alen:
raise TypeError(f"Too few arguments for {alias};"
f" actual {alen}, expected at least {plen - 1}")
if left == alen - right and tvt.has_default():
replacement = _unpack_args(tvt.__default__)
else:
replacement = args[left: alen - right]
return (
*args[:left],
*([fillarg] * (typevartuple_index - left)),
replacement,
*([fillarg] * (plen - right - left - typevartuple_index - 1)),
*args[alen - right:],
)
tvt.__typing_prepare_subst__ = _typevartuple_prepare_subst
return tvt
def __init_subclass__(self, *args, **kwds):
raise TypeError("Cannot subclass special typing classes")
else: # <=3.10
class TypeVarTuple(_DefaultMixin):
"""Type variable tuple.
Usage::
Ts = TypeVarTuple('Ts')
In the same way that a normal type variable is a stand-in for a single
type such as ``int``, a type variable *tuple* is a stand-in for a *tuple*
type such as ``Tuple[int, str]``.
Type variable tuples can be used in ``Generic`` declarations.
Consider the following example::
class Array(Generic[*Ts]): ...
The ``Ts`` type variable tuple here behaves like ``tuple[T1, T2]``,
where ``T1`` and ``T2`` are type variables. To use these type variables
as type parameters of ``Array``, we must *unpack* the type variable tuple using
the star operator: ``*Ts``. The signature of ``Array`` then behaves
as if we had simply written ``class Array(Generic[T1, T2]): ...``.
In contrast to ``Generic[T1, T2]``, however, ``Generic[*Shape]`` allows
us to parameterise the class with an *arbitrary* number of type parameters.
Type variable tuples can be used anywhere a normal ``TypeVar`` can.
This includes class definitions, as shown above, as well as function
signatures and variable annotations::
class Array(Generic[*Ts]):
def __init__(self, shape: Tuple[*Ts]):
self._shape: Tuple[*Ts] = shape
def get_shape(self) -> Tuple[*Ts]:
return self._shape
shape = (Height(480), Width(640))
x: Array[Height, Width] = Array(shape)
y = abs(x) # Inferred type is Array[Height, Width]
z = x + x # ... is Array[Height, Width]
x.get_shape() # ... is tuple[Height, Width]
"""
# Trick Generic __parameters__.
__class__ = typing.TypeVar
def __iter__(self):
yield self.__unpacked__
def __init__(self, name, *, default=NoDefault):
self.__name__ = name
_DefaultMixin.__init__(self, default)
# for pickling:
def_mod = _caller()
if def_mod != 'typing_extensions':
self.__module__ = def_mod
self.__unpacked__ = Unpack[self]
def __repr__(self):
return self.__name__
def __hash__(self):
return object.__hash__(self)
def __eq__(self, other):
return self is other
def __reduce__(self):
return self.__name__
def __init_subclass__(self, *args, **kwds):
if '_root' not in kwds:
raise TypeError("Cannot subclass special typing classes")
if hasattr(typing, "reveal_type"): # 3.11+
reveal_type = typing.reveal_type
else: # <=3.10
def reveal_type(obj: T, /) -> T:
"""Reveal the inferred type of a variable.
When a static type checker encounters a call to ``reveal_type()``,
it will emit the inferred type of the argument::
x: int = 1
reveal_type(x)
Running a static type checker (e.g., ``mypy``) on this example
will produce output similar to 'Revealed type is "builtins.int"'.
At runtime, the function prints the runtime type of the
argument and returns it unchanged.
"""
print(f"Runtime type is {type(obj).__name__!r}", file=sys.stderr)
return obj
if hasattr(typing, "_ASSERT_NEVER_REPR_MAX_LENGTH"): # 3.11+
_ASSERT_NEVER_REPR_MAX_LENGTH = typing._ASSERT_NEVER_REPR_MAX_LENGTH
else: # <=3.10
_ASSERT_NEVER_REPR_MAX_LENGTH = 100
if hasattr(typing, "assert_never"): # 3.11+
assert_never = typing.assert_never
else: # <=3.10
def assert_never(arg: Never, /) -> Never:
"""Assert to the type checker that a line of code is unreachable.
Example::
def int_or_str(arg: int | str) -> None:
match arg:
case int():
print("It's an int")
case str():
print("It's a str")
case _:
assert_never(arg)
If a type checker finds that a call to assert_never() is
reachable, it will emit an error.
At runtime, this throws an exception when called.
"""
value = repr(arg)
if len(value) > _ASSERT_NEVER_REPR_MAX_LENGTH:
value = value[:_ASSERT_NEVER_REPR_MAX_LENGTH] + '...'
raise AssertionError(f"Expected code to be unreachable, but got: {value}")
if sys.version_info >= (3, 12): # 3.12+
# dataclass_transform exists in 3.11 but lacks the frozen_default parameter
dataclass_transform = typing.dataclass_transform
else: # <=3.11
def dataclass_transform(
*,
eq_default: bool = True,
order_default: bool = False,
kw_only_default: bool = False,
frozen_default: bool = False,
field_specifiers: typing.Tuple[
typing.Union[typing.Type[typing.Any], typing.Callable[..., typing.Any]],
...
] = (),
**kwargs: typing.Any,
) -> typing.Callable[[T], T]:
"""Decorator that marks a function, class, or metaclass as providing
dataclass-like behavior.
Example:
from typing_extensions import dataclass_transform
_T = TypeVar("_T")
# Used on a decorator function
@dataclass_transform()
def create_model(cls: type[_T]) -> type[_T]:
...
return cls
@create_model
class CustomerModel:
id: int
name: str
# Used on a base class
@dataclass_transform()
class ModelBase: ...
class CustomerModel(ModelBase):
id: int
name: str
# Used on a metaclass
@dataclass_transform()
class ModelMeta(type): ...
class ModelBase(metaclass=ModelMeta): ...
class CustomerModel(ModelBase):
id: int
name: str
Each of the ``CustomerModel`` classes defined in this example will now
behave similarly to a dataclass created with the ``@dataclasses.dataclass``
decorator. For example, the type checker will synthesize an ``__init__``
method.
The arguments to this decorator can be used to customize this behavior:
- ``eq_default`` indicates whether the ``eq`` parameter is assumed to be
True or False if it is omitted by the caller.
- ``order_default`` indicates whether the ``order`` parameter is
assumed to be True or False if it is omitted by the caller.
- ``kw_only_default`` indicates whether the ``kw_only`` parameter is
assumed to be True or False if it is omitted by the caller.
- ``frozen_default`` indicates whether the ``frozen`` parameter is
assumed to be True or False if it is omitted by the caller.
- ``field_specifiers`` specifies a static list of supported classes
or functions that describe fields, similar to ``dataclasses.field()``.
At runtime, this decorator records its arguments in the
``__dataclass_transform__`` attribute on the decorated object.
See PEP 681 for details.
"""
def decorator(cls_or_fn):
cls_or_fn.__dataclass_transform__ = {
"eq_default": eq_default,
"order_default": order_default,
"kw_only_default": kw_only_default,
"frozen_default": frozen_default,
"field_specifiers": field_specifiers,
"kwargs": kwargs,
}
return cls_or_fn
return decorator
if hasattr(typing, "override"): # 3.12+
override = typing.override
else: # <=3.11
_F = typing.TypeVar("_F", bound=typing.Callable[..., typing.Any])
def override(arg: _F, /) -> _F:
"""Indicate that a method is intended to override a method in a base class.
Usage:
class Base:
def method(self) -> None:
pass
class Child(Base):
@override
def method(self) -> None:
super().method()
When this decorator is applied to a method, the type checker will
validate that it overrides a method with the same name on a base class.
This helps prevent bugs that may occur when a base class is changed
without an equivalent change to a child class.
There is no runtime checking of these properties. The decorator
sets the ``__override__`` attribute to ``True`` on the decorated object
to allow runtime introspection.
See PEP 698 for details.
"""
try:
arg.__override__ = True
except (AttributeError, TypeError):
# Skip the attribute silently if it is not writable.
# AttributeError happens if the object has __slots__ or a
# read-only property, TypeError if it's a builtin class.
pass
return arg
# Python 3.13.3+ contains a fix for the wrapped __new__
if sys.version_info >= (3, 13, 3):
deprecated = warnings.deprecated
else:
_T = typing.TypeVar("_T")
class deprecated:
"""Indicate that a class, function or overload is deprecated.
When this decorator is applied to an object, the type checker
will generate a diagnostic on usage of the deprecated object.
Usage:
@deprecated("Use B instead")
class A:
pass
@deprecated("Use g instead")
def f():
pass
@overload
@deprecated("int support is deprecated")
def g(x: int) -> int: ...
@overload
def g(x: str) -> int: ...
The warning specified by *category* will be emitted at runtime
on use of deprecated objects. For functions, that happens on calls;
for classes, on instantiation and on creation of subclasses.
If the *category* is ``None``, no warning is emitted at runtime.
The *stacklevel* determines where the
warning is emitted. If it is ``1`` (the default), the warning
is emitted at the direct caller of the deprecated object; if it
is higher, it is emitted further up the stack.
Static type checker behavior is not affected by the *category*
and *stacklevel* arguments.
The deprecation message passed to the decorator is saved in the
``__deprecated__`` attribute on the decorated object.
If applied to an overload, the decorator
must be after the ``@overload`` decorator for the attribute to
exist on the overload as returned by ``get_overloads()``.
See PEP 702 for details.
"""
def __init__(
self,
message: str,
/,
*,
category: typing.Optional[typing.Type[Warning]] = DeprecationWarning,
stacklevel: int = 1,
) -> None:
if not isinstance(message, str):
raise TypeError(
"Expected an object of type str for 'message', not "
f"{type(message).__name__!r}"
)
self.message = message
self.category = category
self.stacklevel = stacklevel
def __call__(self, arg: _T, /) -> _T:
# Make sure the inner functions created below don't
# retain a reference to self.
msg = self.message
category = self.category
stacklevel = self.stacklevel
if category is None:
arg.__deprecated__ = msg
return arg
elif isinstance(arg, type):
import functools
from types import MethodType
original_new = arg.__new__
@functools.wraps(original_new)
def __new__(cls, /, *args, **kwargs):
if cls is arg:
warnings.warn(msg, category=category, stacklevel=stacklevel + 1)
if original_new is not object.__new__:
return original_new(cls, *args, **kwargs)
# Mirrors a similar check in object.__new__.
elif cls.__init__ is object.__init__ and (args or kwargs):
raise TypeError(f"{cls.__name__}() takes no arguments")
else:
return original_new(cls)
arg.__new__ = staticmethod(__new__)
original_init_subclass = arg.__init_subclass__
# We need slightly different behavior if __init_subclass__
# is a bound method (likely if it was implemented in Python)
if isinstance(original_init_subclass, MethodType):
original_init_subclass = original_init_subclass.__func__
@functools.wraps(original_init_subclass)
def __init_subclass__(*args, **kwargs):
warnings.warn(msg, category=category, stacklevel=stacklevel + 1)
return original_init_subclass(*args, **kwargs)
arg.__init_subclass__ = classmethod(__init_subclass__)
# Or otherwise, which likely means it's a builtin such as
# object's implementation of __init_subclass__.
else:
@functools.wraps(original_init_subclass)
def __init_subclass__(*args, **kwargs):
warnings.warn(msg, category=category, stacklevel=stacklevel + 1)
return original_init_subclass(*args, **kwargs)
arg.__init_subclass__ = __init_subclass__
arg.__deprecated__ = __new__.__deprecated__ = msg
__init_subclass__.__deprecated__ = msg
return arg
elif callable(arg):
import asyncio.coroutines
import functools
import inspect
@functools.wraps(arg)
def wrapper(*args, **kwargs):
warnings.warn(msg, category=category, stacklevel=stacklevel + 1)
return arg(*args, **kwargs)
if asyncio.coroutines.iscoroutinefunction(arg):
if sys.version_info >= (3, 12):
wrapper = inspect.markcoroutinefunction(wrapper)
else:
wrapper._is_coroutine = asyncio.coroutines._is_coroutine
arg.__deprecated__ = wrapper.__deprecated__ = msg
return wrapper
else:
raise TypeError(
"@deprecated decorator with non-None category must be applied to "
f"a class or callable, not {arg!r}"
)
if sys.version_info < (3, 10):
def _is_param_expr(arg):
return arg is ... or isinstance(
arg, (tuple, list, ParamSpec, _ConcatenateGenericAlias)
)
else:
def _is_param_expr(arg):
return arg is ... or isinstance(
arg,
(
tuple,
list,
ParamSpec,
_ConcatenateGenericAlias,
typing._ConcatenateGenericAlias,
),
)
# We have to do some monkey patching to deal with the dual nature of
# Unpack/TypeVarTuple:
# - We want Unpack to be a kind of TypeVar so it gets accepted in
# Generic[Unpack[Ts]]
# - We want it to *not* be treated as a TypeVar for the purposes of
# counting generic parameters, so that when we subscript a generic,
# the runtime doesn't try to substitute the Unpack with the subscripted type.
if not hasattr(typing, "TypeVarTuple"):
def _check_generic(cls, parameters, elen=_marker):
"""Check correct count for parameters of a generic cls (internal helper).
This gives a nice error message in case of count mismatch.
"""
# If substituting a single ParamSpec with multiple arguments
# we do not check the count
if (inspect.isclass(cls) and issubclass(cls, typing.Generic)
and len(cls.__parameters__) == 1
and isinstance(cls.__parameters__[0], ParamSpec)
and parameters
and not _is_param_expr(parameters[0])
):
# Generic modifies parameters variable, but here we cannot do this
return
if not elen:
raise TypeError(f"{cls} is not a generic class")
if elen is _marker:
if not hasattr(cls, "__parameters__") or not cls.__parameters__:
raise TypeError(f"{cls} is not a generic class")
elen = len(cls.__parameters__)
alen = len(parameters)
if alen != elen:
expect_val = elen
if hasattr(cls, "__parameters__"):
parameters = [p for p in cls.__parameters__ if not _is_unpack(p)]
num_tv_tuples = sum(isinstance(p, TypeVarTuple) for p in parameters)
if (num_tv_tuples > 0) and (alen >= elen - num_tv_tuples):
return
# deal with TypeVarLike defaults
# required TypeVarLikes cannot appear after a defaulted one.
if alen < elen:
# since we validate TypeVarLike default in _collect_type_vars
# or _collect_parameters we can safely check parameters[alen]
if (
getattr(parameters[alen], '__default__', NoDefault)
is not NoDefault
):
return
num_default_tv = sum(getattr(p, '__default__', NoDefault)
is not NoDefault for p in parameters)
elen -= num_default_tv
expect_val = f"at least {elen}"
things = "arguments" if sys.version_info >= (3, 10) else "parameters"
raise TypeError(f"Too {'many' if alen > elen else 'few'} {things}"
f" for {cls}; actual {alen}, expected {expect_val}")
else:
# Python 3.11+
def _check_generic(cls, parameters, elen):
"""Check correct count for parameters of a generic cls (internal helper).
This gives a nice error message in case of count mismatch.
"""
if not elen:
raise TypeError(f"{cls} is not a generic class")
alen = len(parameters)
if alen != elen:
expect_val = elen
if hasattr(cls, "__parameters__"):
parameters = [p for p in cls.__parameters__ if not _is_unpack(p)]
# deal with TypeVarLike defaults
# required TypeVarLikes cannot appear after a defaulted one.
if alen < elen:
# since we validate TypeVarLike default in _collect_type_vars
# or _collect_parameters we can safely check parameters[alen]
if (
getattr(parameters[alen], '__default__', NoDefault)
is not NoDefault
):
return
num_default_tv = sum(getattr(p, '__default__', NoDefault)
is not NoDefault for p in parameters)
elen -= num_default_tv
expect_val = f"at least {elen}"
raise TypeError(f"Too {'many' if alen > elen else 'few'} arguments"
f" for {cls}; actual {alen}, expected {expect_val}")
if not _PEP_696_IMPLEMENTED:
typing._check_generic = _check_generic
def _has_generic_or_protocol_as_origin() -> bool:
try:
frame = sys._getframe(2)
# - Catch AttributeError: not all Python implementations have sys._getframe()
# - Catch ValueError: maybe we're called from an unexpected module
# and the call stack isn't deep enough
except (AttributeError, ValueError):
return False # err on the side of leniency
else:
# If we somehow get invoked from outside typing.py,
# also err on the side of leniency
if frame.f_globals.get("__name__") != "typing":
return False
origin = frame.f_locals.get("origin")
# Cannot use "in" because origin may be an object with a buggy __eq__ that
# throws an error.
return origin is typing.Generic or origin is Protocol or origin is typing.Protocol
_TYPEVARTUPLE_TYPES = {TypeVarTuple, getattr(typing, "TypeVarTuple", None)}
def _is_unpacked_typevartuple(x) -> bool:
if get_origin(x) is not Unpack:
return False
args = get_args(x)
return (
bool(args)
and len(args) == 1
and type(args[0]) in _TYPEVARTUPLE_TYPES
)
# Python 3.11+ _collect_type_vars was renamed to _collect_parameters
if hasattr(typing, '_collect_type_vars'):
def _collect_type_vars(types, typevar_types=None):
"""Collect all type variable contained in types in order of
first appearance (lexicographic order). For example::
_collect_type_vars((T, List[S, T])) == (T, S)
"""
if typevar_types is None:
typevar_types = typing.TypeVar
tvars = []
# A required TypeVarLike cannot appear after a TypeVarLike with a default
# if it was a direct call to `Generic[]` or `Protocol[]`
enforce_default_ordering = _has_generic_or_protocol_as_origin()
default_encountered = False
# Also, a TypeVarLike with a default cannot appear after a TypeVarTuple
type_var_tuple_encountered = False
for t in types:
if _is_unpacked_typevartuple(t):
type_var_tuple_encountered = True
elif (
isinstance(t, typevar_types) and not isinstance(t, _UnpackAlias)
and t not in tvars
):
if enforce_default_ordering:
has_default = getattr(t, '__default__', NoDefault) is not NoDefault
if has_default:
if type_var_tuple_encountered:
raise TypeError('Type parameter with a default'
' follows TypeVarTuple')
default_encountered = True
elif default_encountered:
raise TypeError(f'Type parameter {t!r} without a default'
' follows type parameter with a default')
tvars.append(t)
if _should_collect_from_parameters(t):
tvars.extend([t for t in t.__parameters__ if t not in tvars])
elif isinstance(t, tuple):
# Collect nested type_vars
# tuple wrapped by _prepare_paramspec_params(cls, params)
for x in t:
for collected in _collect_type_vars([x]):
if collected not in tvars:
tvars.append(collected)
return tuple(tvars)
typing._collect_type_vars = _collect_type_vars
else:
def _collect_parameters(args):
"""Collect all type variables and parameter specifications in args
in order of first appearance (lexicographic order).
For example::
assert _collect_parameters((T, Callable[P, T])) == (T, P)
"""
parameters = []
# A required TypeVarLike cannot appear after a TypeVarLike with default
# if it was a direct call to `Generic[]` or `Protocol[]`
enforce_default_ordering = _has_generic_or_protocol_as_origin()
default_encountered = False
# Also, a TypeVarLike with a default cannot appear after a TypeVarTuple
type_var_tuple_encountered = False
for t in args:
if isinstance(t, type):
# We don't want __parameters__ descriptor of a bare Python class.
pass
elif isinstance(t, tuple):
# `t` might be a tuple, when `ParamSpec` is substituted with
# `[T, int]`, or `[int, *Ts]`, etc.
for x in t:
for collected in _collect_parameters([x]):
if collected not in parameters:
parameters.append(collected)
elif hasattr(t, '__typing_subst__'):
if t not in parameters:
if enforce_default_ordering:
has_default = (
getattr(t, '__default__', NoDefault) is not NoDefault
)
if type_var_tuple_encountered and has_default:
raise TypeError('Type parameter with a default'
' follows TypeVarTuple')
if has_default:
default_encountered = True
elif default_encountered:
raise TypeError(f'Type parameter {t!r} without a default'
' follows type parameter with a default')
parameters.append(t)
else:
if _is_unpacked_typevartuple(t):
type_var_tuple_encountered = True
for x in getattr(t, '__parameters__', ()):
if x not in parameters:
parameters.append(x)
return tuple(parameters)
if not _PEP_696_IMPLEMENTED:
typing._collect_parameters = _collect_parameters
# Backport typing.NamedTuple as it exists in Python 3.13.
# In 3.11, the ability to define generic `NamedTuple`s was supported.
# This was explicitly disallowed in 3.9-3.10, and only half-worked in <=3.8.
# On 3.12, we added __orig_bases__ to call-based NamedTuples
# On 3.13, we deprecated kwargs-based NamedTuples
if sys.version_info >= (3, 13):
NamedTuple = typing.NamedTuple
else:
def _make_nmtuple(name, types, module, defaults=()):
fields = [n for n, t in types]
annotations = {n: typing._type_check(t, f"field {n} annotation must be a type")
for n, t in types}
nm_tpl = collections.namedtuple(name, fields,
defaults=defaults, module=module)
nm_tpl.__annotations__ = nm_tpl.__new__.__annotations__ = annotations
# The `_field_types` attribute was removed in 3.9;
# in earlier versions, it is the same as the `__annotations__` attribute
if sys.version_info < (3, 9):
nm_tpl._field_types = annotations
return nm_tpl
_prohibited_namedtuple_fields = typing._prohibited
_special_namedtuple_fields = frozenset({'__module__', '__name__', '__annotations__'})
class _NamedTupleMeta(type):
def __new__(cls, typename, bases, ns):
assert _NamedTuple in bases
for base in bases:
if base is not _NamedTuple and base is not typing.Generic:
raise TypeError(
'can only inherit from a NamedTuple type and Generic')
bases = tuple(tuple if base is _NamedTuple else base for base in bases)
if "__annotations__" in ns:
types = ns["__annotations__"]
elif "__annotate__" in ns:
# TODO: Use inspect.VALUE here, and make the annotations lazily evaluated
types = ns["__annotate__"](1)
else:
types = {}
default_names = []
for field_name in types:
if field_name in ns:
default_names.append(field_name)
elif default_names:
raise TypeError(f"Non-default namedtuple field {field_name} "
f"cannot follow default field"
f"{'s' if len(default_names) > 1 else ''} "
f"{', '.join(default_names)}")
nm_tpl = _make_nmtuple(
typename, types.items(),
defaults=[ns[n] for n in default_names],
module=ns['__module__']
)
nm_tpl.__bases__ = bases
if typing.Generic in bases:
if hasattr(typing, '_generic_class_getitem'): # 3.12+
nm_tpl.__class_getitem__ = classmethod(typing._generic_class_getitem)
else:
class_getitem = typing.Generic.__class_getitem__.__func__
nm_tpl.__class_getitem__ = classmethod(class_getitem)
# update from user namespace without overriding special namedtuple attributes
for key, val in ns.items():
if key in _prohibited_namedtuple_fields:
raise AttributeError("Cannot overwrite NamedTuple attribute " + key)
elif key not in _special_namedtuple_fields:
if key not in nm_tpl._fields:
setattr(nm_tpl, key, ns[key])
try:
set_name = type(val).__set_name__
except AttributeError:
pass
else:
try:
set_name(val, nm_tpl, key)
except BaseException as e:
msg = (
f"Error calling __set_name__ on {type(val).__name__!r} "
f"instance {key!r} in {typename!r}"
)
# BaseException.add_note() existed on py311,
# but the __set_name__ machinery didn't start
# using add_note() until py312.
# Making sure exceptions are raised in the same way
# as in "normal" classes seems most important here.
if sys.version_info >= (3, 12):
e.add_note(msg)
raise
else:
raise RuntimeError(msg) from e
if typing.Generic in bases:
nm_tpl.__init_subclass__()
return nm_tpl
_NamedTuple = type.__new__(_NamedTupleMeta, 'NamedTuple', (), {})
def _namedtuple_mro_entries(bases):
assert NamedTuple in bases
return (_NamedTuple,)
@_ensure_subclassable(_namedtuple_mro_entries)
def NamedTuple(typename, fields=_marker, /, **kwargs):
"""Typed version of namedtuple.
Usage::
class Employee(NamedTuple):
name: str
id: int
This is equivalent to::
Employee = collections.namedtuple('Employee', ['name', 'id'])
The resulting class has an extra __annotations__ attribute, giving a
dict that maps field names to types. (The field names are also in
the _fields attribute, which is part of the namedtuple API.)
An alternative equivalent functional syntax is also accepted::
Employee = NamedTuple('Employee', [('name', str), ('id', int)])
"""
if fields is _marker:
if kwargs:
deprecated_thing = "Creating NamedTuple classes using keyword arguments"
deprecation_msg = (
"{name} is deprecated and will be disallowed in Python {remove}. "
"Use the class-based or functional syntax instead."
)
else:
deprecated_thing = "Failing to pass a value for the 'fields' parameter"
example = f"`{typename} = NamedTuple({typename!r}, [])`"
deprecation_msg = (
"{name} is deprecated and will be disallowed in Python {remove}. "
"To create a NamedTuple class with 0 fields "
"using the functional syntax, "
"pass an empty list, e.g. "
) + example + "."
elif fields is None:
if kwargs:
raise TypeError(
"Cannot pass `None` as the 'fields' parameter "
"and also specify fields using keyword arguments"
)
else:
deprecated_thing = "Passing `None` as the 'fields' parameter"
example = f"`{typename} = NamedTuple({typename!r}, [])`"
deprecation_msg = (
"{name} is deprecated and will be disallowed in Python {remove}. "
"To create a NamedTuple class with 0 fields "
"using the functional syntax, "
"pass an empty list, e.g. "
) + example + "."
elif kwargs:
raise TypeError("Either list of fields or keywords"
" can be provided to NamedTuple, not both")
if fields is _marker or fields is None:
warnings.warn(
deprecation_msg.format(name=deprecated_thing, remove="3.15"),
DeprecationWarning,
stacklevel=2,
)
fields = kwargs.items()
nt = _make_nmtuple(typename, fields, module=_caller())
nt.__orig_bases__ = (NamedTuple,)
return nt
if hasattr(collections.abc, "Buffer"):
Buffer = collections.abc.Buffer
else:
class Buffer(abc.ABC): # noqa: B024
"""Base class for classes that implement the buffer protocol.
The buffer protocol allows Python objects to expose a low-level
memory buffer interface. Before Python 3.12, it is not possible
to implement the buffer protocol in pure Python code, or even
to check whether a class implements the buffer protocol. In
Python 3.12 and higher, the ``__buffer__`` method allows access
to the buffer protocol from Python code, and the
``collections.abc.Buffer`` ABC allows checking whether a class
implements the buffer protocol.
To indicate support for the buffer protocol in earlier versions,
inherit from this ABC, either in a stub file or at runtime,
or use ABC registration. This ABC provides no methods, because
there is no Python-accessible methods shared by pre-3.12 buffer
classes. It is useful primarily for static checks.
"""
# As a courtesy, register the most common stdlib buffer classes.
Buffer.register(memoryview)
Buffer.register(bytearray)
Buffer.register(bytes)
# Backport of types.get_original_bases, available on 3.12+ in CPython
if hasattr(_types, "get_original_bases"):
get_original_bases = _types.get_original_bases
else:
def get_original_bases(cls, /):
"""Return the class's "original" bases prior to modification by `__mro_entries__`.
Examples::
from typing import TypeVar, Generic
from typing_extensions import NamedTuple, TypedDict
T = TypeVar("T")
class Foo(Generic[T]): ...
class Bar(Foo[int], float): ...
class Baz(list[str]): ...
Eggs = NamedTuple("Eggs", [("a", int), ("b", str)])
Spam = TypedDict("Spam", {"a": int, "b": str})
assert get_original_bases(Bar) == (Foo[int], float)
assert get_original_bases(Baz) == (list[str],)
assert get_original_bases(Eggs) == (NamedTuple,)
assert get_original_bases(Spam) == (TypedDict,)
assert get_original_bases(int) == (object,)
"""
try:
return cls.__dict__.get("__orig_bases__", cls.__bases__)
except AttributeError:
raise TypeError(
f'Expected an instance of type, not {type(cls).__name__!r}'
) from None
# NewType is a class on Python 3.10+, making it pickleable
# The error message for subclassing instances of NewType was improved on 3.11+
if sys.version_info >= (3, 11):
NewType = typing.NewType
else:
class NewType:
"""NewType creates simple unique types with almost zero
runtime overhead. NewType(name, tp) is considered a subtype of tp
by static type checkers. At runtime, NewType(name, tp) returns
a dummy callable that simply returns its argument. Usage::
UserId = NewType('UserId', int)
def name_by_id(user_id: UserId) -> str:
...
UserId('user') # Fails type check
name_by_id(42) # Fails type check
name_by_id(UserId(42)) # OK
num = UserId(5) + 1 # type: int
"""
def __call__(self, obj, /):
return obj
def __init__(self, name, tp):
self.__qualname__ = name
if '.' in name:
name = name.rpartition('.')[-1]
self.__name__ = name
self.__supertype__ = tp
def_mod = _caller()
if def_mod != 'typing_extensions':
self.__module__ = def_mod
def __mro_entries__(self, bases):
# We defined __mro_entries__ to get a better error message
# if a user attempts to subclass a NewType instance. bpo-46170
supercls_name = self.__name__
class Dummy:
def __init_subclass__(cls):
subcls_name = cls.__name__
raise TypeError(
f"Cannot subclass an instance of NewType. "
f"Perhaps you were looking for: "
f"`{subcls_name} = NewType({subcls_name!r}, {supercls_name})`"
)
return (Dummy,)
def __repr__(self):
return f'{self.__module__}.{self.__qualname__}'
def __reduce__(self):
return self.__qualname__
if sys.version_info >= (3, 10):
# PEP 604 methods
# It doesn't make sense to have these methods on Python <3.10
def __or__(self, other):
return typing.Union[self, other]
def __ror__(self, other):
return typing.Union[other, self]
if sys.version_info >= (3, 14):
TypeAliasType = typing.TypeAliasType
# 3.8-3.13
else:
if sys.version_info >= (3, 12):
# 3.12-3.14
def _is_unionable(obj):
"""Corresponds to is_unionable() in unionobject.c in CPython."""
return obj is None or isinstance(obj, (
type,
_types.GenericAlias,
_types.UnionType,
typing.TypeAliasType,
TypeAliasType,
))
else:
# 3.8-3.11
def _is_unionable(obj):
"""Corresponds to is_unionable() in unionobject.c in CPython."""
return obj is None or isinstance(obj, (
type,
_types.GenericAlias,
_types.UnionType,
TypeAliasType,
))
if sys.version_info < (3, 10):
# Copied and pasted from https://github.com/python/cpython/blob/986a4e1b6fcae7fe7a1d0a26aea446107dd58dd2/Objects/genericaliasobject.c#L568-L582,
# so that we emulate the behaviour of `types.GenericAlias`
# on the latest versions of CPython
_ATTRIBUTE_DELEGATION_EXCLUSIONS = frozenset({
"__class__",
"__bases__",
"__origin__",
"__args__",
"__unpacked__",
"__parameters__",
"__typing_unpacked_tuple_args__",
"__mro_entries__",
"__reduce_ex__",
"__reduce__",
"__copy__",
"__deepcopy__",
})
class _TypeAliasGenericAlias(typing._GenericAlias, _root=True):
def __getattr__(self, attr):
if attr in _ATTRIBUTE_DELEGATION_EXCLUSIONS:
return object.__getattr__(self, attr)
return getattr(self.__origin__, attr)
if sys.version_info < (3, 9):
def __getitem__(self, item):
result = super().__getitem__(item)
result.__class__ = type(self)
return result
class TypeAliasType:
"""Create named, parameterized type aliases.
This provides a backport of the new `type` statement in Python 3.12:
type ListOrSet[T] = list[T] | set[T]
is equivalent to:
T = TypeVar("T")
ListOrSet = TypeAliasType("ListOrSet", list[T] | set[T], type_params=(T,))
The name ListOrSet can then be used as an alias for the type it refers to.
The type_params argument should contain all the type parameters used
in the value of the type alias. If the alias is not generic, this
argument is omitted.
Static type checkers should only support type aliases declared using
TypeAliasType that follow these rules:
- The first argument (the name) must be a string literal.
- The TypeAliasType instance must be immediately assigned to a variable
of the same name. (For example, 'X = TypeAliasType("Y", int)' is invalid,
as is 'X, Y = TypeAliasType("X", int), TypeAliasType("Y", int)').
"""
def __init__(self, name: str, value, *, type_params=()):
if not isinstance(name, str):
raise TypeError("TypeAliasType name must be a string")
if not isinstance(type_params, tuple):
raise TypeError("type_params must be a tuple")
self.__value__ = value
self.__type_params__ = type_params
default_value_encountered = False
parameters = []
for type_param in type_params:
if (
not isinstance(type_param, (TypeVar, TypeVarTuple, ParamSpec))
# 3.8-3.11
# Unpack Backport passes isinstance(type_param, TypeVar)
or _is_unpack(type_param)
):
raise TypeError(f"Expected a type param, got {type_param!r}")
has_default = (
getattr(type_param, '__default__', NoDefault) is not NoDefault
)
if default_value_encountered and not has_default:
raise TypeError(f"non-default type parameter '{type_param!r}'"
" follows default type parameter")
if has_default:
default_value_encountered = True
if isinstance(type_param, TypeVarTuple):
parameters.extend(type_param)
else:
parameters.append(type_param)
self.__parameters__ = tuple(parameters)
def_mod = _caller()
if def_mod != 'typing_extensions':
self.__module__ = def_mod
# Setting this attribute closes the TypeAliasType from further modification
self.__name__ = name
def __setattr__(self, name: str, value: object, /) -> None:
if hasattr(self, "__name__"):
self._raise_attribute_error(name)
super().__setattr__(name, value)
def __delattr__(self, name: str, /) -> Never:
self._raise_attribute_error(name)
def _raise_attribute_error(self, name: str) -> Never:
# Match the Python 3.12 error messages exactly
if name == "__name__":
raise AttributeError("readonly attribute")
elif name in {"__value__", "__type_params__", "__parameters__", "__module__"}:
raise AttributeError(
f"attribute '{name}' of 'typing.TypeAliasType' objects "
"is not writable"
)
else:
raise AttributeError(
f"'typing.TypeAliasType' object has no attribute '{name}'"
)
def __repr__(self) -> str:
return self.__name__
if sys.version_info < (3, 11):
def _check_single_param(self, param, recursion=0):
# Allow [], [int], [int, str], [int, ...], [int, T]
if param is ...:
return ...
if param is None:
return None
# Note in <= 3.9 _ConcatenateGenericAlias inherits from list
if isinstance(param, list) and recursion == 0:
return [self._check_single_param(arg, recursion+1)
for arg in param]
return typing._type_check(
param, f'Subscripting {self.__name__} requires a type.'
)
def _check_parameters(self, parameters):
if sys.version_info < (3, 11):
return tuple(
self._check_single_param(item)
for item in parameters
)
return tuple(typing._type_check(
item, f'Subscripting {self.__name__} requires a type.'
)
for item in parameters
)
def __getitem__(self, parameters):
if not self.__type_params__:
raise TypeError("Only generic type aliases are subscriptable")
if not isinstance(parameters, tuple):
parameters = (parameters,)
# Using 3.9 here will create problems with Concatenate
if sys.version_info >= (3, 10):
return _types.GenericAlias(self, parameters)
type_vars = _collect_type_vars(parameters)
parameters = self._check_parameters(parameters)
alias = _TypeAliasGenericAlias(self, parameters)
# alias.__parameters__ is not complete if Concatenate is present
# as it is converted to a list from which no parameters are extracted.
if alias.__parameters__ != type_vars:
alias.__parameters__ = type_vars
return alias
def __reduce__(self):
return self.__name__
def __init_subclass__(cls, *args, **kwargs):
raise TypeError(
"type 'typing_extensions.TypeAliasType' is not an acceptable base type"
)
# The presence of this method convinces typing._type_check
# that TypeAliasTypes are types.
def __call__(self):
raise TypeError("Type alias is not callable")
if sys.version_info >= (3, 10):
def __or__(self, right):
# For forward compatibility with 3.12, reject Unions
# that are not accepted by the built-in Union.
if not _is_unionable(right):
return NotImplemented
return typing.Union[self, right]
def __ror__(self, left):
if not _is_unionable(left):
return NotImplemented
return typing.Union[left, self]
if hasattr(typing, "is_protocol"):
is_protocol = typing.is_protocol
get_protocol_members = typing.get_protocol_members
else:
def is_protocol(tp: type, /) -> bool:
"""Return True if the given type is a Protocol.
Example::
>>> from typing_extensions import Protocol, is_protocol
>>> class P(Protocol):
... def a(self) -> str: ...
... b: int
>>> is_protocol(P)
True
>>> is_protocol(int)
False
"""
return (
isinstance(tp, type)
and getattr(tp, '_is_protocol', False)
and tp is not Protocol
and tp is not typing.Protocol
)
def get_protocol_members(tp: type, /) -> typing.FrozenSet[str]:
"""Return the set of members defined in a Protocol.
Example::
>>> from typing_extensions import Protocol, get_protocol_members
>>> class P(Protocol):
... def a(self) -> str: ...
... b: int
>>> get_protocol_members(P)
frozenset({'a', 'b'})
Raise a TypeError for arguments that are not Protocols.
"""
if not is_protocol(tp):
raise TypeError(f'{tp!r} is not a Protocol')
if hasattr(tp, '__protocol_attrs__'):
return frozenset(tp.__protocol_attrs__)
return frozenset(_get_protocol_attrs(tp))
if hasattr(typing, "Doc"):
Doc = typing.Doc
else:
class Doc:
"""Define the documentation of a type annotation using ``Annotated``, to be
used in class attributes, function and method parameters, return values,
and variables.
The value should be a positional-only string literal to allow static tools
like editors and documentation generators to use it.
This complements docstrings.
The string value passed is available in the attribute ``documentation``.
Example::
>>> from typing_extensions import Annotated, Doc
>>> def hi(to: Annotated[str, Doc("Who to say hi to")]) -> None: ...
"""
def __init__(self, documentation: str, /) -> None:
self.documentation = documentation
def __repr__(self) -> str:
return f"Doc({self.documentation!r})"
def __hash__(self) -> int:
return hash(self.documentation)
def __eq__(self, other: object) -> bool:
if not isinstance(other, Doc):
return NotImplemented
return self.documentation == other.documentation
_CapsuleType = getattr(_types, "CapsuleType", None)
if _CapsuleType is None:
try:
import _socket
except ImportError:
pass
else:
_CAPI = getattr(_socket, "CAPI", None)
if _CAPI is not None:
_CapsuleType = type(_CAPI)
if _CapsuleType is not None:
CapsuleType = _CapsuleType
__all__.append("CapsuleType")
# Using this convoluted approach so that this keeps working
# whether we end up using PEP 649 as written, PEP 749, or
# some other variation: in any case, inspect.get_annotations
# will continue to exist and will gain a `format` parameter.
_PEP_649_OR_749_IMPLEMENTED = (
hasattr(inspect, 'get_annotations')
and inspect.get_annotations.__kwdefaults__ is not None
and "format" in inspect.get_annotations.__kwdefaults__
)
class Format(enum.IntEnum):
VALUE = 1
FORWARDREF = 2
STRING = 3
if _PEP_649_OR_749_IMPLEMENTED:
get_annotations = inspect.get_annotations
else:
def get_annotations(obj, *, globals=None, locals=None, eval_str=False,
format=Format.VALUE):
"""Compute the annotations dict for an object.
obj may be a callable, class, or module.
Passing in an object of any other type raises TypeError.
Returns a dict. get_annotations() returns a new dict every time
it's called; calling it twice on the same object will return two
different but equivalent dicts.
This is a backport of `inspect.get_annotations`, which has been
in the standard library since Python 3.10. See the standard library
documentation for more:
https://docs.python.org/3/library/inspect.html#inspect.get_annotations
This backport adds the *format* argument introduced by PEP 649. The
three formats supported are:
* VALUE: the annotations are returned as-is. This is the default and
it is compatible with the behavior on previous Python versions.
* FORWARDREF: return annotations as-is if possible, but replace any
undefined names with ForwardRef objects. The implementation proposed by
PEP 649 relies on language changes that cannot be backported; the
typing-extensions implementation simply returns the same result as VALUE.
* STRING: return annotations as strings, in a format close to the original
source. Again, this behavior cannot be replicated directly in a backport.
As an approximation, typing-extensions retrieves the annotations under
VALUE semantics and then stringifies them.
The purpose of this backport is to allow users who would like to use
FORWARDREF or STRING semantics once PEP 649 is implemented, but who also
want to support earlier Python versions, to simply write:
typing_extensions.get_annotations(obj, format=Format.FORWARDREF)
"""
format = Format(format)
if eval_str and format is not Format.VALUE:
raise ValueError("eval_str=True is only supported with format=Format.VALUE")
if isinstance(obj, type):
# class
obj_dict = getattr(obj, '__dict__', None)
if obj_dict and hasattr(obj_dict, 'get'):
ann = obj_dict.get('__annotations__', None)
if isinstance(ann, _types.GetSetDescriptorType):
ann = None
else:
ann = None
obj_globals = None
module_name = getattr(obj, '__module__', None)
if module_name:
module = sys.modules.get(module_name, None)
if module:
obj_globals = getattr(module, '__dict__', None)
obj_locals = dict(vars(obj))
unwrap = obj
elif isinstance(obj, _types.ModuleType):
# module
ann = getattr(obj, '__annotations__', None)
obj_globals = obj.__dict__
obj_locals = None
unwrap = None
elif callable(obj):
# this includes types.Function, types.BuiltinFunctionType,
# types.BuiltinMethodType, functools.partial, functools.singledispatch,
# "class funclike" from Lib/test/test_inspect... on and on it goes.
ann = getattr(obj, '__annotations__', None)
obj_globals = getattr(obj, '__globals__', None)
obj_locals = None
unwrap = obj
elif hasattr(obj, '__annotations__'):
ann = obj.__annotations__
obj_globals = obj_locals = unwrap = None
else:
raise TypeError(f"{obj!r} is not a module, class, or callable.")
if ann is None:
return {}
if not isinstance(ann, dict):
raise ValueError(f"{obj!r}.__annotations__ is neither a dict nor None")
if not ann:
return {}
if not eval_str:
if format is Format.STRING:
return {
key: value if isinstance(value, str) else typing._type_repr(value)
for key, value in ann.items()
}
return dict(ann)
if unwrap is not None:
while True:
if hasattr(unwrap, '__wrapped__'):
unwrap = unwrap.__wrapped__
continue
if isinstance(unwrap, functools.partial):
unwrap = unwrap.func
continue
break
if hasattr(unwrap, "__globals__"):
obj_globals = unwrap.__globals__
if globals is None:
globals = obj_globals
if locals is None:
locals = obj_locals or {}
# "Inject" type parameters into the local namespace
# (unless they are shadowed by assignments *in* the local namespace),
# as a way of emulating annotation scopes when calling `eval()`
if type_params := getattr(obj, "__type_params__", ()):
locals = {param.__name__: param for param in type_params} | locals
return_value = {key:
value if not isinstance(value, str) else eval(value, globals, locals)
for key, value in ann.items() }
return return_value
if hasattr(typing, "evaluate_forward_ref"):
evaluate_forward_ref = typing.evaluate_forward_ref
else:
# Implements annotationlib.ForwardRef.evaluate
def _eval_with_owner(
forward_ref, *, owner=None, globals=None, locals=None, type_params=None
):
if forward_ref.__forward_evaluated__:
return forward_ref.__forward_value__
if getattr(forward_ref, "__cell__", None) is not None:
try:
value = forward_ref.__cell__.cell_contents
except ValueError:
pass
else:
forward_ref.__forward_evaluated__ = True
forward_ref.__forward_value__ = value
return value
if owner is None:
owner = getattr(forward_ref, "__owner__", None)
if (
globals is None
and getattr(forward_ref, "__forward_module__", None) is not None
):
globals = getattr(
sys.modules.get(forward_ref.__forward_module__, None), "__dict__", None
)
if globals is None:
globals = getattr(forward_ref, "__globals__", None)
if globals is None:
if isinstance(owner, type):
module_name = getattr(owner, "__module__", None)
if module_name:
module = sys.modules.get(module_name, None)
if module:
globals = getattr(module, "__dict__", None)
elif isinstance(owner, _types.ModuleType):
globals = getattr(owner, "__dict__", None)
elif callable(owner):
globals = getattr(owner, "__globals__", None)
# If we pass None to eval() below, the globals of this module are used.
if globals is None:
globals = {}
if locals is None:
locals = {}
if isinstance(owner, type):
locals.update(vars(owner))
if type_params is None and owner is not None:
# "Inject" type parameters into the local namespace
# (unless they are shadowed by assignments *in* the local namespace),
# as a way of emulating annotation scopes when calling `eval()`
type_params = getattr(owner, "__type_params__", None)
# type parameters require some special handling,
# as they exist in their own scope
# but `eval()` does not have a dedicated parameter for that scope.
# For classes, names in type parameter scopes should override
# names in the global scope (which here are called `localns`!),
# but should in turn be overridden by names in the class scope
# (which here are called `globalns`!)
if type_params is not None:
globals = dict(globals)
locals = dict(locals)
for param in type_params:
param_name = param.__name__
if (
_FORWARD_REF_HAS_CLASS and not forward_ref.__forward_is_class__
) or param_name not in globals:
globals[param_name] = param
locals.pop(param_name, None)
arg = forward_ref.__forward_arg__
if arg.isidentifier() and not keyword.iskeyword(arg):
if arg in locals:
value = locals[arg]
elif arg in globals:
value = globals[arg]
elif hasattr(builtins, arg):
return getattr(builtins, arg)
else:
raise NameError(arg)
else:
code = forward_ref.__forward_code__
value = eval(code, globals, locals)
forward_ref.__forward_evaluated__ = True
forward_ref.__forward_value__ = value
return value
def _lax_type_check(
value, msg, is_argument=True, *, module=None, allow_special_forms=False
):
"""
A lax Python 3.11+ like version of typing._type_check
"""
if hasattr(typing, "_type_convert"):
if (
sys.version_info >= (3, 10, 3)
or (3, 9, 10) < sys.version_info[:3] < (3, 10)
):
# allow_special_forms introduced later cpython/#30926 (bpo-46539)
type_ = typing._type_convert(
value,
module=module,
allow_special_forms=allow_special_forms,
)
# module was added with bpo-41249 before is_class (bpo-46539)
elif "__forward_module__" in typing.ForwardRef.__slots__:
type_ = typing._type_convert(value, module=module)
else:
type_ = typing._type_convert(value)
else:
if value is None:
return type(None)
if isinstance(value, str):
return ForwardRef(value)
type_ = value
invalid_generic_forms = (Generic, Protocol)
if not allow_special_forms:
invalid_generic_forms += (ClassVar,)
if is_argument:
invalid_generic_forms += (Final,)
if (
isinstance(type_, typing._GenericAlias)
and get_origin(type_) in invalid_generic_forms
):
raise TypeError(f"{type_} is not valid as type argument") from None
if type_ in (Any, LiteralString, NoReturn, Never, Self, TypeAlias):
return type_
if allow_special_forms and type_ in (ClassVar, Final):
return type_
if (
isinstance(type_, (_SpecialForm, typing._SpecialForm))
or type_ in (Generic, Protocol)
):
raise TypeError(f"Plain {type_} is not valid as type argument") from None
if type(type_) is tuple: # lax version with tuple instead of callable
raise TypeError(f"{msg} Got {type_!r:.100}.")
return type_
def evaluate_forward_ref(
forward_ref,
*,
owner=None,
globals=None,
locals=None,
type_params=None,
format=Format.VALUE,
_recursive_guard=frozenset(),
):
"""Evaluate a forward reference as a type hint.
This is similar to calling the ForwardRef.evaluate() method,
but unlike that method, evaluate_forward_ref() also:
* Recursively evaluates forward references nested within the type hint.
* Rejects certain objects that are not valid type hints.
* Replaces type hints that evaluate to None with types.NoneType.
* Supports the *FORWARDREF* and *STRING* formats.
*forward_ref* must be an instance of ForwardRef. *owner*, if given,
should be the object that holds the annotations that the forward reference
derived from, such as a module, class object, or function. It is used to
infer the namespaces to use for looking up names. *globals* and *locals*
can also be explicitly given to provide the global and local namespaces.
*type_params* is a tuple of type parameters that are in scope when
evaluating the forward reference. This parameter must be provided (though
it may be an empty tuple) if *owner* is not given and the forward reference
does not already have an owner set. *format* specifies the format of the
annotation and is a member of the annotationlib.Format enum.
"""
if format == Format.STRING:
return forward_ref.__forward_arg__
if forward_ref.__forward_arg__ in _recursive_guard:
return forward_ref
# Evaluate the forward reference
try:
value = _eval_with_owner(
forward_ref,
owner=owner,
globals=globals,
locals=locals,
type_params=type_params,
)
except NameError:
if format == Format.FORWARDREF:
return forward_ref
else:
raise
msg = "Forward references must evaluate to types."
if not _FORWARD_REF_HAS_CLASS:
allow_special_forms = not forward_ref.__forward_is_argument__
else:
allow_special_forms = forward_ref.__forward_is_class__
type_ = _lax_type_check(
value,
msg,
is_argument=forward_ref.__forward_is_argument__,
allow_special_forms=allow_special_forms,
)
# Recursively evaluate the type
if isinstance(type_, ForwardRef):
if getattr(type_, "__forward_module__", True) is not None:
globals = None
return evaluate_forward_ref(
type_,
globals=globals,
locals=locals,
type_params=type_params, owner=owner,
_recursive_guard=_recursive_guard, format=format
)
if sys.version_info < (3, 12, 5) and type_params:
# Make use of type_params
locals = dict(locals) if locals else {}
for tvar in type_params:
if tvar.__name__ not in locals: # lets not overwrite something present
locals[tvar.__name__] = tvar
if sys.version_info < (3, 9):
return typing._eval_type(
type_,
globals,
locals,
)
if sys.version_info < (3, 12, 5):
return typing._eval_type(
type_,
globals,
locals,
recursive_guard=_recursive_guard | {forward_ref.__forward_arg__},
)
if sys.version_info < (3, 14):
return typing._eval_type(
type_,
globals,
locals,
type_params,
recursive_guard=_recursive_guard | {forward_ref.__forward_arg__},
)
return typing._eval_type(
type_,
globals,
locals,
type_params,
recursive_guard=_recursive_guard | {forward_ref.__forward_arg__},
format=format,
owner=owner,
)
# Aliases for items that have always been in typing.
# Explicitly assign these (rather than using `from typing import *` at the top),
# so that we get a CI error if one of these is deleted from typing.py
# in a future version of Python
AbstractSet = typing.AbstractSet
AnyStr = typing.AnyStr
BinaryIO = typing.BinaryIO
Callable = typing.Callable
Collection = typing.Collection
Container = typing.Container
Dict = typing.Dict
ForwardRef = typing.ForwardRef
FrozenSet = typing.FrozenSet
Generic = typing.Generic
Hashable = typing.Hashable
IO = typing.IO
ItemsView = typing.ItemsView
Iterable = typing.Iterable
Iterator = typing.Iterator
KeysView = typing.KeysView
List = typing.List
Mapping = typing.Mapping
MappingView = typing.MappingView
Match = typing.Match
MutableMapping = typing.MutableMapping
MutableSequence = typing.MutableSequence
MutableSet = typing.MutableSet
Optional = typing.Optional
Pattern = typing.Pattern
Reversible = typing.Reversible
Sequence = typing.Sequence
Set = typing.Set
Sized = typing.Sized
TextIO = typing.TextIO
Tuple = typing.Tuple
Union = typing.Union
ValuesView = typing.ValuesView
cast = typing.cast
no_type_check = typing.no_type_check
no_type_check_decorator = typing.no_type_check_decorator
|
# mypy: allow-untyped-defs, allow-untyped-calls
from __future__ import annotations
import os
from typing import List
from typing import Optional
from typing import TYPE_CHECKING
from typing import Union
from . import autogenerate as autogen
from . import util
from .runtime.environment import EnvironmentContext
from .script import ScriptDirectory
if TYPE_CHECKING:
from alembic.config import Config
from alembic.script.base import Script
from alembic.script.revision import _RevIdType
from .runtime.environment import ProcessRevisionDirectiveFn
def list_templates(config: Config) -> None:
"""List available templates.
:param config: a :class:`.Config` object.
"""
config.print_stdout("Available templates:\n")
for tempname in os.listdir(config.get_template_directory()):
with open(
os.path.join(config.get_template_directory(), tempname, "README")
) as readme:
synopsis = next(readme).rstrip()
config.print_stdout("%s - %s", tempname, synopsis)
config.print_stdout("\nTemplates are used via the 'init' command, e.g.:")
config.print_stdout("\n alembic init --template generic ./scripts")
def init(
config: Config,
directory: str,
template: str = "generic",
package: bool = False,
) -> None:
"""Initialize a new scripts directory.
:param config: a :class:`.Config` object.
:param directory: string path of the target directory.
:param template: string name of the migration environment template to
use.
:param package: when True, write ``__init__.py`` files into the
environment location as well as the versions/ location.
"""
if os.access(directory, os.F_OK) and os.listdir(directory):
raise util.CommandError(
"Directory %s already exists and is not empty" % directory
)
template_dir = os.path.join(config.get_template_directory(), template)
if not os.access(template_dir, os.F_OK):
raise util.CommandError("No such template %r" % template)
if not os.access(directory, os.F_OK):
with util.status(
f"Creating directory {os.path.abspath(directory)!r}",
**config.messaging_opts,
):
os.makedirs(directory)
versions = os.path.join(directory, "versions")
with util.status(
f"Creating directory {os.path.abspath(versions)!r}",
**config.messaging_opts,
):
os.makedirs(versions)
script = ScriptDirectory(directory)
config_file: str | None = None
for file_ in os.listdir(template_dir):
file_path = os.path.join(template_dir, file_)
if file_ == "alembic.ini.mako":
assert config.config_file_name is not None
config_file = os.path.abspath(config.config_file_name)
if os.access(config_file, os.F_OK):
util.msg(
f"File {config_file!r} already exists, skipping",
**config.messaging_opts,
)
else:
script._generate_template(
file_path, config_file, script_location=directory
)
elif os.path.isfile(file_path):
output_file = os.path.join(directory, file_)
script._copy_file(file_path, output_file)
if package:
for path in [
os.path.join(os.path.abspath(directory), "__init__.py"),
os.path.join(os.path.abspath(versions), "__init__.py"),
]:
with util.status(f"Adding {path!r}", **config.messaging_opts):
with open(path, "w"):
pass
assert config_file is not None
util.msg(
"Please edit configuration/connection/logging "
f"settings in {config_file!r} before proceeding.",
**config.messaging_opts,
)
def revision(
config: Config,
message: Optional[str] = None,
autogenerate: bool = False,
sql: bool = False,
head: str = "head",
splice: bool = False,
branch_label: Optional[_RevIdType] = None,
version_path: Optional[str] = None,
rev_id: Optional[str] = None,
depends_on: Optional[str] = None,
process_revision_directives: Optional[ProcessRevisionDirectiveFn] = None,
) -> Union[Optional[Script], List[Optional[Script]]]:
"""Create a new revision file.
:param config: a :class:`.Config` object.
:param message: string message to apply to the revision; this is the
``-m`` option to ``alembic revision``.
:param autogenerate: whether or not to autogenerate the script from
the database; this is the ``--autogenerate`` option to
``alembic revision``.
:param sql: whether to dump the script out as a SQL string; when specified,
the script is dumped to stdout. This is the ``--sql`` option to
``alembic revision``.
:param head: head revision to build the new revision upon as a parent;
this is the ``--head`` option to ``alembic revision``.
:param splice: whether or not the new revision should be made into a
new head of its own; is required when the given ``head`` is not itself
a head. This is the ``--splice`` option to ``alembic revision``.
:param branch_label: string label to apply to the branch; this is the
``--branch-label`` option to ``alembic revision``.
:param version_path: string symbol identifying a specific version path
from the configuration; this is the ``--version-path`` option to
``alembic revision``.
:param rev_id: optional revision identifier to use instead of having
one generated; this is the ``--rev-id`` option to ``alembic revision``.
:param depends_on: optional list of "depends on" identifiers; this is the
``--depends-on`` option to ``alembic revision``.
:param process_revision_directives: this is a callable that takes the
same form as the callable described at
:paramref:`.EnvironmentContext.configure.process_revision_directives`;
will be applied to the structure generated by the revision process
where it can be altered programmatically. Note that unlike all
the other parameters, this option is only available via programmatic
use of :func:`.command.revision`.
"""
script_directory = ScriptDirectory.from_config(config)
command_args = dict(
message=message,
autogenerate=autogenerate,
sql=sql,
head=head,
splice=splice,
branch_label=branch_label,
version_path=version_path,
rev_id=rev_id,
depends_on=depends_on,
)
revision_context = autogen.RevisionContext(
config,
script_directory,
command_args,
process_revision_directives=process_revision_directives,
)
environment = util.asbool(config.get_main_option("revision_environment"))
if autogenerate:
environment = True
if sql:
raise util.CommandError(
"Using --sql with --autogenerate does not make any sense"
)
def retrieve_migrations(rev, context):
revision_context.run_autogenerate(rev, context)
return []
elif environment:
def retrieve_migrations(rev, context):
revision_context.run_no_autogenerate(rev, context)
return []
elif sql:
raise util.CommandError(
"Using --sql with the revision command when "
"revision_environment is not configured does not make any sense"
)
if environment:
with EnvironmentContext(
config,
script_directory,
fn=retrieve_migrations,
as_sql=sql,
template_args=revision_context.template_args,
revision_context=revision_context,
):
script_directory.run_env()
# the revision_context now has MigrationScript structure(s) present.
# these could theoretically be further processed / rewritten *here*,
# in addition to the hooks present within each run_migrations() call,
# or at the end of env.py run_migrations_online().
scripts = [script for script in revision_context.generate_scripts()]
if len(scripts) == 1:
return scripts[0]
else:
return scripts
def check(config: "Config") -> None:
"""Check if revision command with autogenerate has pending upgrade ops.
:param config: a :class:`.Config` object.
.. versionadded:: 1.9.0
"""
script_directory = ScriptDirectory.from_config(config)
command_args = dict(
message=None,
autogenerate=True,
sql=False,
head="head",
splice=False,
branch_label=None,
version_path=None,
rev_id=None,
depends_on=None,
)
revision_context = autogen.RevisionContext(
config,
script_directory,
command_args,
)
def retrieve_migrations(rev, context):
revision_context.run_autogenerate(rev, context)
return []
with EnvironmentContext(
config,
script_directory,
fn=retrieve_migrations,
as_sql=False,
template_args=revision_context.template_args,
revision_context=revision_context,
):
script_directory.run_env()
# the revision_context now has MigrationScript structure(s) present.
migration_script = revision_context.generated_revisions[-1]
diffs = []
for upgrade_ops in migration_script.upgrade_ops_list:
diffs.extend(upgrade_ops.as_diffs())
if diffs:
raise util.AutogenerateDiffsDetected(
f"New upgrade operations detected: {diffs}",
revision_context=revision_context,
diffs=diffs,
)
else:
config.print_stdout("No new upgrade operations detected.")
def merge(
config: Config,
revisions: _RevIdType,
message: Optional[str] = None,
branch_label: Optional[_RevIdType] = None,
rev_id: Optional[str] = None,
) -> Optional[Script]:
"""Merge two revisions together. Creates a new migration file.
:param config: a :class:`.Config` instance
:param revisions: The revisions to merge.
:param message: string message to apply to the revision.
:param branch_label: string label name to apply to the new revision.
:param rev_id: hardcoded revision identifier instead of generating a new
one.
.. seealso::
:ref:`branches`
"""
script = ScriptDirectory.from_config(config)
template_args = {
"config": config # Let templates use config for
# e.g. multiple databases
}
environment = util.asbool(config.get_main_option("revision_environment"))
if environment:
def nothing(rev, context):
return []
with EnvironmentContext(
config,
script,
fn=nothing,
as_sql=False,
template_args=template_args,
):
script.run_env()
return script.generate_revision(
rev_id or util.rev_id(),
message,
refresh=True,
head=revisions,
branch_labels=branch_label,
**template_args, # type:ignore[arg-type]
)
def upgrade(
config: Config,
revision: str,
sql: bool = False,
tag: Optional[str] = None,
) -> None:
"""Upgrade to a later version.
:param config: a :class:`.Config` instance.
:param revision: string revision target or range for --sql mode. May be
``"heads"`` to target the most recent revision(s).
:param sql: if True, use ``--sql`` mode.
:param tag: an arbitrary "tag" that can be intercepted by custom
``env.py`` scripts via the :meth:`.EnvironmentContext.get_tag_argument`
method.
"""
script = ScriptDirectory.from_config(config)
starting_rev = None
if ":" in revision:
if not sql:
raise util.CommandError("Range revision not allowed")
starting_rev, revision = revision.split(":", 2)
def upgrade(rev, context):
return script._upgrade_revs(revision, rev)
with EnvironmentContext(
config,
script,
fn=upgrade,
as_sql=sql,
starting_rev=starting_rev,
destination_rev=revision,
tag=tag,
):
script.run_env()
def downgrade(
config: Config,
revision: str,
sql: bool = False,
tag: Optional[str] = None,
) -> None:
"""Revert to a previous version.
:param config: a :class:`.Config` instance.
:param revision: string revision target or range for --sql mode. May
be ``"base"`` to target the first revision.
:param sql: if True, use ``--sql`` mode.
:param tag: an arbitrary "tag" that can be intercepted by custom
``env.py`` scripts via the :meth:`.EnvironmentContext.get_tag_argument`
method.
"""
script = ScriptDirectory.from_config(config)
starting_rev = None
if ":" in revision:
if not sql:
raise util.CommandError("Range revision not allowed")
starting_rev, revision = revision.split(":", 2)
elif sql:
raise util.CommandError(
"downgrade with --sql requires <fromrev>:<torev>"
)
def downgrade(rev, context):
return script._downgrade_revs(revision, rev)
with EnvironmentContext(
config,
script,
fn=downgrade,
as_sql=sql,
starting_rev=starting_rev,
destination_rev=revision,
tag=tag,
):
script.run_env()
def show(config: Config, rev: str) -> None:
"""Show the revision(s) denoted by the given symbol.
:param config: a :class:`.Config` instance.
:param rev: string revision target. May be ``"current"`` to show the
revision(s) currently applied in the database.
"""
script = ScriptDirectory.from_config(config)
if rev == "current":
def show_current(rev, context):
for sc in script.get_revisions(rev):
config.print_stdout(sc.log_entry)
return []
with EnvironmentContext(config, script, fn=show_current):
script.run_env()
else:
for sc in script.get_revisions(rev):
config.print_stdout(sc.log_entry)
def history(
config: Config,
rev_range: Optional[str] = None,
verbose: bool = False,
indicate_current: bool = False,
) -> None:
"""List changeset scripts in chronological order.
:param config: a :class:`.Config` instance.
:param rev_range: string revision range.
:param verbose: output in verbose mode.
:param indicate_current: indicate current revision.
"""
base: Optional[str]
head: Optional[str]
script = ScriptDirectory.from_config(config)
if rev_range is not None:
if ":" not in rev_range:
raise util.CommandError(
"History range requires [start]:[end], " "[start]:, or :[end]"
)
base, head = rev_range.strip().split(":")
else:
base = head = None
environment = (
util.asbool(config.get_main_option("revision_environment"))
or indicate_current
)
def _display_history(config, script, base, head, currents=()):
for sc in script.walk_revisions(
base=base or "base", head=head or "heads"
):
if indicate_current:
sc._db_current_indicator = sc.revision in currents
config.print_stdout(
sc.cmd_format(
verbose=verbose,
include_branches=True,
include_doc=True,
include_parents=True,
)
)
def _display_history_w_current(config, script, base, head):
def _display_current_history(rev, context):
if head == "current":
_display_history(config, script, base, rev, rev)
elif base == "current":
_display_history(config, script, rev, head, rev)
else:
_display_history(config, script, base, head, rev)
return []
with EnvironmentContext(config, script, fn=_display_current_history):
script.run_env()
if base == "current" or head == "current" or environment:
_display_history_w_current(config, script, base, head)
else:
_display_history(config, script, base, head)
def heads(
config: Config, verbose: bool = False, resolve_dependencies: bool = False
) -> None:
"""Show current available heads in the script directory.
:param config: a :class:`.Config` instance.
:param verbose: output in verbose mode.
:param resolve_dependencies: treat dependency version as down revisions.
"""
script = ScriptDirectory.from_config(config)
if resolve_dependencies:
heads = script.get_revisions("heads")
else:
heads = script.get_revisions(script.get_heads())
for rev in heads:
config.print_stdout(
rev.cmd_format(
verbose, include_branches=True, tree_indicators=False
)
)
def branches(config: Config, verbose: bool = False) -> None:
"""Show current branch points.
:param config: a :class:`.Config` instance.
:param verbose: output in verbose mode.
"""
script = ScriptDirectory.from_config(config)
for sc in script.walk_revisions():
if sc.is_branch_point:
config.print_stdout(
"%s\n%s\n",
sc.cmd_format(verbose, include_branches=True),
"\n".join(
"%s -> %s"
% (
" " * len(str(sc.revision)),
rev_obj.cmd_format(
False, include_branches=True, include_doc=verbose
),
)
for rev_obj in (
script.get_revision(rev) for rev in sc.nextrev
)
),
)
def current(config: Config, verbose: bool = False) -> None:
"""Display the current revision for a database.
:param config: a :class:`.Config` instance.
:param verbose: output in verbose mode.
"""
script = ScriptDirectory.from_config(config)
def display_version(rev, context):
if verbose:
config.print_stdout(
"Current revision(s) for %s:",
util.obfuscate_url_pw(context.connection.engine.url),
)
for rev in script.get_all_current(rev):
config.print_stdout(rev.cmd_format(verbose))
return []
with EnvironmentContext(
config, script, fn=display_version, dont_mutate=True
):
script.run_env()
def stamp(
config: Config,
revision: _RevIdType,
sql: bool = False,
tag: Optional[str] = None,
purge: bool = False,
) -> None:
"""'stamp' the revision table with the given revision; don't
run any migrations.
:param config: a :class:`.Config` instance.
:param revision: target revision or list of revisions. May be a list
to indicate stamping of multiple branch heads; may be ``"base"``
to remove all revisions from the table or ``"heads"`` to stamp the
most recent revision(s).
.. note:: this parameter is called "revisions" in the command line
interface.
:param sql: use ``--sql`` mode
:param tag: an arbitrary "tag" that can be intercepted by custom
``env.py`` scripts via the :class:`.EnvironmentContext.get_tag_argument`
method.
:param purge: delete all entries in the version table before stamping.
"""
script = ScriptDirectory.from_config(config)
if sql:
destination_revs = []
starting_rev = None
for _revision in util.to_list(revision):
if ":" in _revision:
srev, _revision = _revision.split(":", 2)
if starting_rev != srev:
if starting_rev is None:
starting_rev = srev
else:
raise util.CommandError(
"Stamp operation with --sql only supports a "
"single starting revision at a time"
)
destination_revs.append(_revision)
else:
destination_revs = util.to_list(revision)
def do_stamp(rev, context):
return script._stamp_revs(util.to_tuple(destination_revs), rev)
with EnvironmentContext(
config,
script,
fn=do_stamp,
as_sql=sql,
starting_rev=starting_rev if sql else None,
destination_rev=util.to_tuple(destination_revs),
tag=tag,
purge=purge,
):
script.run_env()
def edit(config: Config, rev: str) -> None:
"""Edit revision script(s) using $EDITOR.
:param config: a :class:`.Config` instance.
:param rev: target revision.
"""
script = ScriptDirectory.from_config(config)
if rev == "current":
def edit_current(rev, context):
if not rev:
raise util.CommandError("No current revisions")
for sc in script.get_revisions(rev):
util.open_in_editor(sc.path)
return []
with EnvironmentContext(config, script, fn=edit_current):
script.run_env()
else:
revs = script.get_revisions(rev)
if not revs:
raise util.CommandError(
"No revision files indicated by symbol '%s'" % rev
)
for sc in revs:
assert sc
util.open_in_editor(sc.path)
def ensure_version(config: Config, sql: bool = False) -> None:
"""Create the alembic version table if it doesn't exist already .
:param config: a :class:`.Config` instance.
:param sql: use ``--sql`` mode.
.. versionadded:: 1.7.6
"""
script = ScriptDirectory.from_config(config)
def do_ensure_version(rev, context):
context._ensure_version_table()
return []
with EnvironmentContext(
config,
script,
fn=do_ensure_version,
as_sql=sql,
):
script.run_env()
|
from __future__ import annotations
from argparse import ArgumentParser
from argparse import Namespace
from configparser import ConfigParser
import inspect
import os
import sys
from typing import Any
from typing import cast
from typing import Dict
from typing import Mapping
from typing import Optional
from typing import overload
from typing import Sequence
from typing import TextIO
from typing import Union
from typing_extensions import TypedDict
from . import __version__
from . import command
from . import util
from .util import compat
class Config:
r"""Represent an Alembic configuration.
Within an ``env.py`` script, this is available
via the :attr:`.EnvironmentContext.config` attribute,
which in turn is available at ``alembic.context``::
from alembic import context
some_param = context.config.get_main_option("my option")
When invoking Alembic programmatically, a new
:class:`.Config` can be created by passing
the name of an .ini file to the constructor::
from alembic.config import Config
alembic_cfg = Config("/path/to/yourapp/alembic.ini")
With a :class:`.Config` object, you can then
run Alembic commands programmatically using the directives
in :mod:`alembic.command`.
The :class:`.Config` object can also be constructed without
a filename. Values can be set programmatically, and
new sections will be created as needed::
from alembic.config import Config
alembic_cfg = Config()
alembic_cfg.set_main_option("script_location", "myapp:migrations")
alembic_cfg.set_main_option("sqlalchemy.url", "postgresql://foo/bar")
alembic_cfg.set_section_option("mysection", "foo", "bar")
.. warning::
When using programmatic configuration, make sure the
``env.py`` file in use is compatible with the target configuration;
including that the call to Python ``logging.fileConfig()`` is
omitted if the programmatic configuration doesn't actually include
logging directives.
For passing non-string values to environments, such as connections and
engines, use the :attr:`.Config.attributes` dictionary::
with engine.begin() as connection:
alembic_cfg.attributes['connection'] = connection
command.upgrade(alembic_cfg, "head")
:param file\_: name of the .ini file to open.
:param ini_section: name of the main Alembic section within the
.ini file
:param output_buffer: optional file-like input buffer which
will be passed to the :class:`.MigrationContext` - used to redirect
the output of "offline generation" when using Alembic programmatically.
:param stdout: buffer where the "print" output of commands will be sent.
Defaults to ``sys.stdout``.
:param config_args: A dictionary of keys and values that will be used
for substitution in the alembic config file. The dictionary as given
is **copied** to a new one, stored locally as the attribute
``.config_args``. When the :attr:`.Config.file_config` attribute is
first invoked, the replacement variable ``here`` will be added to this
dictionary before the dictionary is passed to ``ConfigParser()``
to parse the .ini file.
:param attributes: optional dictionary of arbitrary Python keys/values,
which will be populated into the :attr:`.Config.attributes` dictionary.
.. seealso::
:ref:`connection_sharing`
"""
def __init__(
self,
file_: Union[str, os.PathLike[str], None] = None,
ini_section: str = "alembic",
output_buffer: Optional[TextIO] = None,
stdout: TextIO = sys.stdout,
cmd_opts: Optional[Namespace] = None,
config_args: Mapping[str, Any] = util.immutabledict(),
attributes: Optional[Dict[str, Any]] = None,
) -> None:
"""Construct a new :class:`.Config`"""
self.config_file_name = file_
self.config_ini_section = ini_section
self.output_buffer = output_buffer
self.stdout = stdout
self.cmd_opts = cmd_opts
self.config_args = dict(config_args)
if attributes:
self.attributes.update(attributes)
cmd_opts: Optional[Namespace] = None
"""The command-line options passed to the ``alembic`` script.
Within an ``env.py`` script this can be accessed via the
:attr:`.EnvironmentContext.config` attribute.
.. seealso::
:meth:`.EnvironmentContext.get_x_argument`
"""
config_file_name: Union[str, os.PathLike[str], None] = None
"""Filesystem path to the .ini file in use."""
config_ini_section: str = None # type:ignore[assignment]
"""Name of the config file section to read basic configuration
from. Defaults to ``alembic``, that is the ``[alembic]`` section
of the .ini file. This value is modified using the ``-n/--name``
option to the Alembic runner.
"""
@util.memoized_property
def attributes(self) -> Dict[str, Any]:
"""A Python dictionary for storage of additional state.
This is a utility dictionary which can include not just strings but
engines, connections, schema objects, or anything else.
Use this to pass objects into an env.py script, such as passing
a :class:`sqlalchemy.engine.base.Connection` when calling
commands from :mod:`alembic.command` programmatically.
.. seealso::
:ref:`connection_sharing`
:paramref:`.Config.attributes`
"""
return {}
def print_stdout(self, text: str, *arg: Any) -> None:
"""Render a message to standard out.
When :meth:`.Config.print_stdout` is called with additional args
those arguments will formatted against the provided text,
otherwise we simply output the provided text verbatim.
This is a no-op when the``quiet`` messaging option is enabled.
e.g.::
>>> config.print_stdout('Some text %s', 'arg')
Some Text arg
"""
if arg:
output = str(text) % arg
else:
output = str(text)
util.write_outstream(self.stdout, output, "\n", **self.messaging_opts)
@util.memoized_property
def file_config(self) -> ConfigParser:
"""Return the underlying ``ConfigParser`` object.
Direct access to the .ini file is available here,
though the :meth:`.Config.get_section` and
:meth:`.Config.get_main_option`
methods provide a possibly simpler interface.
"""
if self.config_file_name:
here = os.path.abspath(os.path.dirname(self.config_file_name))
else:
here = ""
self.config_args["here"] = here
file_config = ConfigParser(self.config_args)
if self.config_file_name:
compat.read_config_parser(file_config, [self.config_file_name])
else:
file_config.add_section(self.config_ini_section)
return file_config
def get_template_directory(self) -> str:
"""Return the directory where Alembic setup templates are found.
This method is used by the alembic ``init`` and ``list_templates``
commands.
"""
import alembic
package_dir = os.path.abspath(os.path.dirname(alembic.__file__))
return os.path.join(package_dir, "templates")
@overload
def get_section(
self, name: str, default: None = ...
) -> Optional[Dict[str, str]]: ...
# "default" here could also be a TypeVar
# _MT = TypeVar("_MT", bound=Mapping[str, str]),
# however mypy wasn't handling that correctly (pyright was)
@overload
def get_section(
self, name: str, default: Dict[str, str]
) -> Dict[str, str]: ...
@overload
def get_section(
self, name: str, default: Mapping[str, str]
) -> Union[Dict[str, str], Mapping[str, str]]: ...
def get_section(
self, name: str, default: Optional[Mapping[str, str]] = None
) -> Optional[Mapping[str, str]]:
"""Return all the configuration options from a given .ini file section
as a dictionary.
If the given section does not exist, the value of ``default``
is returned, which is expected to be a dictionary or other mapping.
"""
if not self.file_config.has_section(name):
return default
return dict(self.file_config.items(name))
def set_main_option(self, name: str, value: str) -> None:
"""Set an option programmatically within the 'main' section.
This overrides whatever was in the .ini file.
:param name: name of the value
:param value: the value. Note that this value is passed to
``ConfigParser.set``, which supports variable interpolation using
pyformat (e.g. ``%(some_value)s``). A raw percent sign not part of
an interpolation symbol must therefore be escaped, e.g. ``%%``.
The given value may refer to another value already in the file
using the interpolation format.
"""
self.set_section_option(self.config_ini_section, name, value)
def remove_main_option(self, name: str) -> None:
self.file_config.remove_option(self.config_ini_section, name)
def set_section_option(self, section: str, name: str, value: str) -> None:
"""Set an option programmatically within the given section.
The section is created if it doesn't exist already.
The value here will override whatever was in the .ini
file.
:param section: name of the section
:param name: name of the value
:param value: the value. Note that this value is passed to
``ConfigParser.set``, which supports variable interpolation using
pyformat (e.g. ``%(some_value)s``). A raw percent sign not part of
an interpolation symbol must therefore be escaped, e.g. ``%%``.
The given value may refer to another value already in the file
using the interpolation format.
"""
if not self.file_config.has_section(section):
self.file_config.add_section(section)
self.file_config.set(section, name, value)
def get_section_option(
self, section: str, name: str, default: Optional[str] = None
) -> Optional[str]:
"""Return an option from the given section of the .ini file."""
if not self.file_config.has_section(section):
raise util.CommandError(
"No config file %r found, or file has no "
"'[%s]' section" % (self.config_file_name, section)
)
if self.file_config.has_option(section, name):
return self.file_config.get(section, name)
else:
return default
@overload
def get_main_option(self, name: str, default: str) -> str: ...
@overload
def get_main_option(
self, name: str, default: Optional[str] = None
) -> Optional[str]: ...
def get_main_option(
self, name: str, default: Optional[str] = None
) -> Optional[str]:
"""Return an option from the 'main' section of the .ini file.
This defaults to being a key from the ``[alembic]``
section, unless the ``-n/--name`` flag were used to
indicate a different section.
"""
return self.get_section_option(self.config_ini_section, name, default)
@util.memoized_property
def messaging_opts(self) -> MessagingOptions:
"""The messaging options."""
return cast(
MessagingOptions,
util.immutabledict(
{"quiet": getattr(self.cmd_opts, "quiet", False)}
),
)
class MessagingOptions(TypedDict, total=False):
quiet: bool
class CommandLine:
def __init__(self, prog: Optional[str] = None) -> None:
self._generate_args(prog)
def _generate_args(self, prog: Optional[str]) -> None:
def add_options(
fn: Any, parser: Any, positional: Any, kwargs: Any
) -> None:
kwargs_opts = {
"template": (
"-t",
"--template",
dict(
default="generic",
type=str,
help="Setup template for use with 'init'",
),
),
"message": (
"-m",
"--message",
dict(
type=str, help="Message string to use with 'revision'"
),
),
"sql": (
"--sql",
dict(
action="store_true",
help="Don't emit SQL to database - dump to "
"standard output/file instead. See docs on "
"offline mode.",
),
),
"tag": (
"--tag",
dict(
type=str,
help="Arbitrary 'tag' name - can be used by "
"custom env.py scripts.",
),
),
"head": (
"--head",
dict(
type=str,
help="Specify head revision or <branchname>@head "
"to base new revision on.",
),
),
"splice": (
"--splice",
dict(
action="store_true",
help="Allow a non-head revision as the "
"'head' to splice onto",
),
),
"depends_on": (
"--depends-on",
dict(
action="append",
help="Specify one or more revision identifiers "
"which this revision should depend on.",
),
),
"rev_id": (
"--rev-id",
dict(
type=str,
help="Specify a hardcoded revision id instead of "
"generating one",
),
),
"version_path": (
"--version-path",
dict(
type=str,
help="Specify specific path from config for "
"version file",
),
),
"branch_label": (
"--branch-label",
dict(
type=str,
help="Specify a branch label to apply to the "
"new revision",
),
),
"verbose": (
"-v",
"--verbose",
dict(action="store_true", help="Use more verbose output"),
),
"resolve_dependencies": (
"--resolve-dependencies",
dict(
action="store_true",
help="Treat dependency versions as down revisions",
),
),
"autogenerate": (
"--autogenerate",
dict(
action="store_true",
help="Populate revision script with candidate "
"migration operations, based on comparison "
"of database to model.",
),
),
"rev_range": (
"-r",
"--rev-range",
dict(
action="store",
help="Specify a revision range; "
"format is [start]:[end]",
),
),
"indicate_current": (
"-i",
"--indicate-current",
dict(
action="store_true",
help="Indicate the current revision",
),
),
"purge": (
"--purge",
dict(
action="store_true",
help="Unconditionally erase the version table "
"before stamping",
),
),
"package": (
"--package",
dict(
action="store_true",
help="Write empty __init__.py files to the "
"environment and version locations",
),
),
}
positional_help = {
"directory": "location of scripts directory",
"revision": "revision identifier",
"revisions": "one or more revisions, or 'heads' for all heads",
}
for arg in kwargs:
if arg in kwargs_opts:
args = kwargs_opts[arg]
args, kw = args[0:-1], args[-1]
parser.add_argument(*args, **kw)
for arg in positional:
if (
arg == "revisions"
or fn in positional_translations
and positional_translations[fn][arg] == "revisions"
):
subparser.add_argument(
"revisions",
nargs="+",
help=positional_help.get("revisions"),
)
else:
subparser.add_argument(arg, help=positional_help.get(arg))
parser = ArgumentParser(prog=prog)
parser.add_argument(
"--version", action="version", version="%%(prog)s %s" % __version__
)
parser.add_argument(
"-c",
"--config",
type=str,
default=os.environ.get("ALEMBIC_CONFIG", "alembic.ini"),
help="Alternate config file; defaults to value of "
'ALEMBIC_CONFIG environment variable, or "alembic.ini"',
)
parser.add_argument(
"-n",
"--name",
type=str,
default="alembic",
help="Name of section in .ini file to " "use for Alembic config",
)
parser.add_argument(
"-x",
action="append",
help="Additional arguments consumed by "
"custom env.py scripts, e.g. -x "
"setting1=somesetting -x setting2=somesetting",
)
parser.add_argument(
"--raiseerr",
action="store_true",
help="Raise a full stack trace on error",
)
parser.add_argument(
"-q",
"--quiet",
action="store_true",
help="Do not log to std output.",
)
subparsers = parser.add_subparsers()
positional_translations: Dict[Any, Any] = {
command.stamp: {"revision": "revisions"}
}
for fn in [getattr(command, n) for n in dir(command)]:
if (
inspect.isfunction(fn)
and fn.__name__[0] != "_"
and fn.__module__ == "alembic.command"
):
spec = compat.inspect_getfullargspec(fn)
if spec[3] is not None:
positional = spec[0][1 : -len(spec[3])]
kwarg = spec[0][-len(spec[3]) :]
else:
positional = spec[0][1:]
kwarg = []
if fn in positional_translations:
positional = [
positional_translations[fn].get(name, name)
for name in positional
]
# parse first line(s) of helptext without a line break
help_ = fn.__doc__
if help_:
help_text = []
for line in help_.split("\n"):
if not line.strip():
break
else:
help_text.append(line.strip())
else:
help_text = []
subparser = subparsers.add_parser(
fn.__name__, help=" ".join(help_text)
)
add_options(fn, subparser, positional, kwarg)
subparser.set_defaults(cmd=(fn, positional, kwarg))
self.parser = parser
def run_cmd(self, config: Config, options: Namespace) -> None:
fn, positional, kwarg = options.cmd
try:
fn(
config,
*[getattr(options, k, None) for k in positional],
**{k: getattr(options, k, None) for k in kwarg},
)
except util.CommandError as e:
if options.raiseerr:
raise
else:
util.err(str(e), **config.messaging_opts)
def main(self, argv: Optional[Sequence[str]] = None) -> None:
options = self.parser.parse_args(argv)
if not hasattr(options, "cmd"):
# see http://bugs.python.org/issue9253, argparse
# behavior changed incompatibly in py3.3
self.parser.error("too few arguments")
else:
cfg = Config(
file_=options.config,
ini_section=options.name,
cmd_opts=options,
)
self.run_cmd(cfg, options)
def main(
argv: Optional[Sequence[str]] = None,
prog: Optional[str] = None,
**kwargs: Any,
) -> None:
"""The console runner function for Alembic."""
CommandLine(prog=prog).main(argv=argv)
if __name__ == "__main__":
main()
|
from .runtime.environment import EnvironmentContext
# create proxy functions for
# each method on the EnvironmentContext class.
EnvironmentContext.create_module_class_proxy(globals(), locals())
|
from .runtime.environment import * # noqa
|
from .runtime.migration import * # noqa
|
from .operations.base import Operations
# create proxy functions for
# each method on the Operations class.
Operations.create_module_class_proxy(globals(), locals())
|
from . import context
from . import op
__version__ = "1.15.2"
|
from .config import main
if __name__ == "__main__":
main(prog="alembic")
|
from __future__ import annotations
import contextlib
from typing import Any
from typing import Dict
from typing import Iterator
from typing import List
from typing import Optional
from typing import Sequence
from typing import Set
from typing import TYPE_CHECKING
from typing import Union
from sqlalchemy import inspect
from . import compare
from . import render
from .. import util
from ..operations import ops
from ..util import sqla_compat
"""Provide the 'autogenerate' feature which can produce migration operations
automatically."""
if TYPE_CHECKING:
from sqlalchemy.engine import Connection
from sqlalchemy.engine import Dialect
from sqlalchemy.engine import Inspector
from sqlalchemy.sql.schema import MetaData
from sqlalchemy.sql.schema import SchemaItem
from sqlalchemy.sql.schema import Table
from ..config import Config
from ..operations.ops import DowngradeOps
from ..operations.ops import MigrationScript
from ..operations.ops import UpgradeOps
from ..runtime.environment import NameFilterParentNames
from ..runtime.environment import NameFilterType
from ..runtime.environment import ProcessRevisionDirectiveFn
from ..runtime.environment import RenderItemFn
from ..runtime.migration import MigrationContext
from ..script.base import Script
from ..script.base import ScriptDirectory
from ..script.revision import _GetRevArg
def compare_metadata(context: MigrationContext, metadata: MetaData) -> Any:
"""Compare a database schema to that given in a
:class:`~sqlalchemy.schema.MetaData` instance.
The database connection is presented in the context
of a :class:`.MigrationContext` object, which
provides database connectivity as well as optional
comparison functions to use for datatypes and
server defaults - see the "autogenerate" arguments
at :meth:`.EnvironmentContext.configure`
for details on these.
The return format is a list of "diff" directives,
each representing individual differences::
from alembic.migration import MigrationContext
from alembic.autogenerate import compare_metadata
from sqlalchemy import (
create_engine,
MetaData,
Column,
Integer,
String,
Table,
text,
)
import pprint
engine = create_engine("sqlite://")
with engine.begin() as conn:
conn.execute(
text(
'''
create table foo (
id integer not null primary key,
old_data varchar,
x integer
)
'''
)
)
conn.execute(text("create table bar (data varchar)"))
metadata = MetaData()
Table(
"foo",
metadata,
Column("id", Integer, primary_key=True),
Column("data", Integer),
Column("x", Integer, nullable=False),
)
Table("bat", metadata, Column("info", String))
mc = MigrationContext.configure(engine.connect())
diff = compare_metadata(mc, metadata)
pprint.pprint(diff, indent=2, width=20)
Output::
[
(
"add_table",
Table(
"bat",
MetaData(),
Column("info", String(), table=<bat>),
schema=None,
),
),
(
"remove_table",
Table(
"bar",
MetaData(),
Column("data", VARCHAR(), table=<bar>),
schema=None,
),
),
(
"add_column",
None,
"foo",
Column("data", Integer(), table=<foo>),
),
[
(
"modify_nullable",
None,
"foo",
"x",
{
"existing_comment": None,
"existing_server_default": False,
"existing_type": INTEGER(),
},
True,
False,
)
],
(
"remove_column",
None,
"foo",
Column("old_data", VARCHAR(), table=<foo>),
),
]
:param context: a :class:`.MigrationContext`
instance.
:param metadata: a :class:`~sqlalchemy.schema.MetaData`
instance.
.. seealso::
:func:`.produce_migrations` - produces a :class:`.MigrationScript`
structure based on metadata comparison.
"""
migration_script = produce_migrations(context, metadata)
assert migration_script.upgrade_ops is not None
return migration_script.upgrade_ops.as_diffs()
def produce_migrations(
context: MigrationContext, metadata: MetaData
) -> MigrationScript:
"""Produce a :class:`.MigrationScript` structure based on schema
comparison.
This function does essentially what :func:`.compare_metadata` does,
but then runs the resulting list of diffs to produce the full
:class:`.MigrationScript` object. For an example of what this looks like,
see the example in :ref:`customizing_revision`.
.. seealso::
:func:`.compare_metadata` - returns more fundamental "diff"
data from comparing a schema.
"""
autogen_context = AutogenContext(context, metadata=metadata)
migration_script = ops.MigrationScript(
rev_id=None,
upgrade_ops=ops.UpgradeOps([]),
downgrade_ops=ops.DowngradeOps([]),
)
compare._populate_migration_script(autogen_context, migration_script)
return migration_script
def render_python_code(
up_or_down_op: Union[UpgradeOps, DowngradeOps],
sqlalchemy_module_prefix: str = "sa.",
alembic_module_prefix: str = "op.",
render_as_batch: bool = False,
imports: Sequence[str] = (),
render_item: Optional[RenderItemFn] = None,
migration_context: Optional[MigrationContext] = None,
user_module_prefix: Optional[str] = None,
) -> str:
"""Render Python code given an :class:`.UpgradeOps` or
:class:`.DowngradeOps` object.
This is a convenience function that can be used to test the
autogenerate output of a user-defined :class:`.MigrationScript` structure.
:param up_or_down_op: :class:`.UpgradeOps` or :class:`.DowngradeOps` object
:param sqlalchemy_module_prefix: module prefix for SQLAlchemy objects
:param alembic_module_prefix: module prefix for Alembic constructs
:param render_as_batch: use "batch operations" style for rendering
:param imports: sequence of import symbols to add
:param render_item: callable to render items
:param migration_context: optional :class:`.MigrationContext`
:param user_module_prefix: optional string prefix for user-defined types
.. versionadded:: 1.11.0
"""
opts = {
"sqlalchemy_module_prefix": sqlalchemy_module_prefix,
"alembic_module_prefix": alembic_module_prefix,
"render_item": render_item,
"render_as_batch": render_as_batch,
"user_module_prefix": user_module_prefix,
}
if migration_context is None:
from ..runtime.migration import MigrationContext
from sqlalchemy.engine.default import DefaultDialect
migration_context = MigrationContext.configure(
dialect=DefaultDialect()
)
autogen_context = AutogenContext(migration_context, opts=opts)
autogen_context.imports = set(imports)
return render._indent(
render._render_cmd_body(up_or_down_op, autogen_context)
)
def _render_migration_diffs(
context: MigrationContext, template_args: Dict[Any, Any]
) -> None:
"""legacy, used by test_autogen_composition at the moment"""
autogen_context = AutogenContext(context)
upgrade_ops = ops.UpgradeOps([])
compare._produce_net_changes(autogen_context, upgrade_ops)
migration_script = ops.MigrationScript(
rev_id=None,
upgrade_ops=upgrade_ops,
downgrade_ops=upgrade_ops.reverse(),
)
render._render_python_into_templatevars(
autogen_context, migration_script, template_args
)
class AutogenContext:
"""Maintains configuration and state that's specific to an
autogenerate operation."""
metadata: Union[MetaData, Sequence[MetaData], None] = None
"""The :class:`~sqlalchemy.schema.MetaData` object
representing the destination.
This object is the one that is passed within ``env.py``
to the :paramref:`.EnvironmentContext.configure.target_metadata`
parameter. It represents the structure of :class:`.Table` and other
objects as stated in the current database model, and represents the
destination structure for the database being examined.
While the :class:`~sqlalchemy.schema.MetaData` object is primarily
known as a collection of :class:`~sqlalchemy.schema.Table` objects,
it also has an :attr:`~sqlalchemy.schema.MetaData.info` dictionary
that may be used by end-user schemes to store additional schema-level
objects that are to be compared in custom autogeneration schemes.
"""
connection: Optional[Connection] = None
"""The :class:`~sqlalchemy.engine.base.Connection` object currently
connected to the database backend being compared.
This is obtained from the :attr:`.MigrationContext.bind` and is
ultimately set up in the ``env.py`` script.
"""
dialect: Optional[Dialect] = None
"""The :class:`~sqlalchemy.engine.Dialect` object currently in use.
This is normally obtained from the
:attr:`~sqlalchemy.engine.base.Connection.dialect` attribute.
"""
imports: Set[str] = None # type: ignore[assignment]
"""A ``set()`` which contains string Python import directives.
The directives are to be rendered into the ``${imports}`` section
of a script template. The set is normally empty and can be modified
within hooks such as the
:paramref:`.EnvironmentContext.configure.render_item` hook.
.. seealso::
:ref:`autogen_render_types`
"""
migration_context: MigrationContext = None # type: ignore[assignment]
"""The :class:`.MigrationContext` established by the ``env.py`` script."""
def __init__(
self,
migration_context: MigrationContext,
metadata: Union[MetaData, Sequence[MetaData], None] = None,
opts: Optional[Dict[str, Any]] = None,
autogenerate: bool = True,
) -> None:
if (
autogenerate
and migration_context is not None
and migration_context.as_sql
):
raise util.CommandError(
"autogenerate can't use as_sql=True as it prevents querying "
"the database for schema information"
)
if opts is None:
opts = migration_context.opts
self.metadata = metadata = (
opts.get("target_metadata", None) if metadata is None else metadata
)
if (
autogenerate
and metadata is None
and migration_context is not None
and migration_context.script is not None
):
raise util.CommandError(
"Can't proceed with --autogenerate option; environment "
"script %s does not provide "
"a MetaData object or sequence of objects to the context."
% (migration_context.script.env_py_location)
)
include_object = opts.get("include_object", None)
include_name = opts.get("include_name", None)
object_filters = []
name_filters = []
if include_object:
object_filters.append(include_object)
if include_name:
name_filters.append(include_name)
self._object_filters = object_filters
self._name_filters = name_filters
self.migration_context = migration_context
if self.migration_context is not None:
self.connection = self.migration_context.bind
self.dialect = self.migration_context.dialect
self.imports = set()
self.opts: Dict[str, Any] = opts
self._has_batch: bool = False
@util.memoized_property
def inspector(self) -> Inspector:
if self.connection is None:
raise TypeError(
"can't return inspector as this "
"AutogenContext has no database connection"
)
return inspect(self.connection)
@contextlib.contextmanager
def _within_batch(self) -> Iterator[None]:
self._has_batch = True
yield
self._has_batch = False
def run_name_filters(
self,
name: Optional[str],
type_: NameFilterType,
parent_names: NameFilterParentNames,
) -> bool:
"""Run the context's name filters and return True if the targets
should be part of the autogenerate operation.
This method should be run for every kind of name encountered within the
reflection side of an autogenerate operation, giving the environment
the chance to filter what names should be reflected as database
objects. The filters here are produced directly via the
:paramref:`.EnvironmentContext.configure.include_name` parameter.
"""
if "schema_name" in parent_names:
if type_ == "table":
table_name = name
else:
table_name = parent_names.get("table_name", None)
if table_name:
schema_name = parent_names["schema_name"]
if schema_name:
parent_names["schema_qualified_table_name"] = "%s.%s" % (
schema_name,
table_name,
)
else:
parent_names["schema_qualified_table_name"] = table_name
for fn in self._name_filters:
if not fn(name, type_, parent_names):
return False
else:
return True
def run_object_filters(
self,
object_: SchemaItem,
name: sqla_compat._ConstraintName,
type_: NameFilterType,
reflected: bool,
compare_to: Optional[SchemaItem],
) -> bool:
"""Run the context's object filters and return True if the targets
should be part of the autogenerate operation.
This method should be run for every kind of object encountered within
an autogenerate operation, giving the environment the chance
to filter what objects should be included in the comparison.
The filters here are produced directly via the
:paramref:`.EnvironmentContext.configure.include_object` parameter.
"""
for fn in self._object_filters:
if not fn(object_, name, type_, reflected, compare_to):
return False
else:
return True
run_filters = run_object_filters
@util.memoized_property
def sorted_tables(self) -> List[Table]:
"""Return an aggregate of the :attr:`.MetaData.sorted_tables`
collection(s).
For a sequence of :class:`.MetaData` objects, this
concatenates the :attr:`.MetaData.sorted_tables` collection
for each individual :class:`.MetaData` in the order of the
sequence. It does **not** collate the sorted tables collections.
"""
result = []
for m in util.to_list(self.metadata):
result.extend(m.sorted_tables)
return result
@util.memoized_property
def table_key_to_table(self) -> Dict[str, Table]:
"""Return an aggregate of the :attr:`.MetaData.tables` dictionaries.
The :attr:`.MetaData.tables` collection is a dictionary of table key
to :class:`.Table`; this method aggregates the dictionary across
multiple :class:`.MetaData` objects into one dictionary.
Duplicate table keys are **not** supported; if two :class:`.MetaData`
objects contain the same table key, an exception is raised.
"""
result: Dict[str, Table] = {}
for m in util.to_list(self.metadata):
intersect = set(result).intersection(set(m.tables))
if intersect:
raise ValueError(
"Duplicate table keys across multiple "
"MetaData objects: %s"
% (", ".join('"%s"' % key for key in sorted(intersect)))
)
result.update(m.tables)
return result
class RevisionContext:
"""Maintains configuration and state that's specific to a revision
file generation operation."""
generated_revisions: List[MigrationScript]
process_revision_directives: Optional[ProcessRevisionDirectiveFn]
def __init__(
self,
config: Config,
script_directory: ScriptDirectory,
command_args: Dict[str, Any],
process_revision_directives: Optional[
ProcessRevisionDirectiveFn
] = None,
) -> None:
self.config = config
self.script_directory = script_directory
self.command_args = command_args
self.process_revision_directives = process_revision_directives
self.template_args = {
"config": config # Let templates use config for
# e.g. multiple databases
}
self.generated_revisions = [self._default_revision()]
def _to_script(
self, migration_script: MigrationScript
) -> Optional[Script]:
template_args: Dict[str, Any] = self.template_args.copy()
if getattr(migration_script, "_needs_render", False):
autogen_context = self._last_autogen_context
# clear out existing imports if we are doing multiple
# renders
autogen_context.imports = set()
if migration_script.imports:
autogen_context.imports.update(migration_script.imports)
render._render_python_into_templatevars(
autogen_context, migration_script, template_args
)
assert migration_script.rev_id is not None
return self.script_directory.generate_revision(
migration_script.rev_id,
migration_script.message,
refresh=True,
head=migration_script.head,
splice=migration_script.splice,
branch_labels=migration_script.branch_label,
version_path=migration_script.version_path,
depends_on=migration_script.depends_on,
**template_args,
)
def run_autogenerate(
self, rev: _GetRevArg, migration_context: MigrationContext
) -> None:
self._run_environment(rev, migration_context, True)
def run_no_autogenerate(
self, rev: _GetRevArg, migration_context: MigrationContext
) -> None:
self._run_environment(rev, migration_context, False)
def _run_environment(
self,
rev: _GetRevArg,
migration_context: MigrationContext,
autogenerate: bool,
) -> None:
if autogenerate:
if self.command_args["sql"]:
raise util.CommandError(
"Using --sql with --autogenerate does not make any sense"
)
if set(self.script_directory.get_revisions(rev)) != set(
self.script_directory.get_revisions("heads")
):
raise util.CommandError("Target database is not up to date.")
upgrade_token = migration_context.opts["upgrade_token"]
downgrade_token = migration_context.opts["downgrade_token"]
migration_script = self.generated_revisions[-1]
if not getattr(migration_script, "_needs_render", False):
migration_script.upgrade_ops_list[-1].upgrade_token = upgrade_token
migration_script.downgrade_ops_list[-1].downgrade_token = (
downgrade_token
)
migration_script._needs_render = True
else:
migration_script._upgrade_ops.append(
ops.UpgradeOps([], upgrade_token=upgrade_token)
)
migration_script._downgrade_ops.append(
ops.DowngradeOps([], downgrade_token=downgrade_token)
)
autogen_context = AutogenContext(
migration_context, autogenerate=autogenerate
)
self._last_autogen_context: AutogenContext = autogen_context
if autogenerate:
compare._populate_migration_script(
autogen_context, migration_script
)
if self.process_revision_directives:
self.process_revision_directives(
migration_context, rev, self.generated_revisions
)
hook = migration_context.opts["process_revision_directives"]
if hook:
hook(migration_context, rev, self.generated_revisions)
for migration_script in self.generated_revisions:
migration_script._needs_render = True
def _default_revision(self) -> MigrationScript:
command_args: Dict[str, Any] = self.command_args
op = ops.MigrationScript(
rev_id=command_args["rev_id"] or util.rev_id(),
message=command_args["message"],
upgrade_ops=ops.UpgradeOps([]),
downgrade_ops=ops.DowngradeOps([]),
head=command_args["head"],
splice=command_args["splice"],
branch_label=command_args["branch_label"],
version_path=command_args["version_path"],
depends_on=command_args["depends_on"],
)
return op
def generate_scripts(self) -> Iterator[Optional[Script]]:
for generated_revision in self.generated_revisions:
yield self._to_script(generated_revision)
|
# mypy: allow-untyped-defs, allow-incomplete-defs, allow-untyped-calls
# mypy: no-warn-return-any, allow-any-generics
from __future__ import annotations
import contextlib
import logging
import re
from typing import Any
from typing import cast
from typing import Dict
from typing import Iterator
from typing import Mapping
from typing import Optional
from typing import Set
from typing import Tuple
from typing import TYPE_CHECKING
from typing import TypeVar
from typing import Union
from sqlalchemy import event
from sqlalchemy import inspect
from sqlalchemy import schema as sa_schema
from sqlalchemy import text
from sqlalchemy import types as sqltypes
from sqlalchemy.sql import expression
from sqlalchemy.sql.schema import ForeignKeyConstraint
from sqlalchemy.sql.schema import Index
from sqlalchemy.sql.schema import UniqueConstraint
from sqlalchemy.util import OrderedSet
from .. import util
from ..ddl._autogen import is_index_sig
from ..ddl._autogen import is_uq_sig
from ..operations import ops
from ..util import sqla_compat
if TYPE_CHECKING:
from typing import Literal
from sqlalchemy.engine.reflection import Inspector
from sqlalchemy.sql.elements import quoted_name
from sqlalchemy.sql.elements import TextClause
from sqlalchemy.sql.schema import Column
from sqlalchemy.sql.schema import Table
from alembic.autogenerate.api import AutogenContext
from alembic.ddl.impl import DefaultImpl
from alembic.operations.ops import AlterColumnOp
from alembic.operations.ops import MigrationScript
from alembic.operations.ops import ModifyTableOps
from alembic.operations.ops import UpgradeOps
from ..ddl._autogen import _constraint_sig
log = logging.getLogger(__name__)
def _populate_migration_script(
autogen_context: AutogenContext, migration_script: MigrationScript
) -> None:
upgrade_ops = migration_script.upgrade_ops_list[-1]
downgrade_ops = migration_script.downgrade_ops_list[-1]
_produce_net_changes(autogen_context, upgrade_ops)
upgrade_ops.reverse_into(downgrade_ops)
comparators = util.Dispatcher(uselist=True)
def _produce_net_changes(
autogen_context: AutogenContext, upgrade_ops: UpgradeOps
) -> None:
connection = autogen_context.connection
assert connection is not None
include_schemas = autogen_context.opts.get("include_schemas", False)
inspector: Inspector = inspect(connection)
default_schema = connection.dialect.default_schema_name
schemas: Set[Optional[str]]
if include_schemas:
schemas = set(inspector.get_schema_names())
# replace default schema name with None
schemas.discard("information_schema")
# replace the "default" schema with None
schemas.discard(default_schema)
schemas.add(None)
else:
schemas = {None}
schemas = {
s for s in schemas if autogen_context.run_name_filters(s, "schema", {})
}
assert autogen_context.dialect is not None
comparators.dispatch("schema", autogen_context.dialect.name)(
autogen_context, upgrade_ops, schemas
)
@comparators.dispatch_for("schema")
def _autogen_for_tables(
autogen_context: AutogenContext,
upgrade_ops: UpgradeOps,
schemas: Union[Set[None], Set[Optional[str]]],
) -> None:
inspector = autogen_context.inspector
conn_table_names: Set[Tuple[Optional[str], str]] = set()
version_table_schema = (
autogen_context.migration_context.version_table_schema
)
version_table = autogen_context.migration_context.version_table
for schema_name in schemas:
tables = set(inspector.get_table_names(schema=schema_name))
if schema_name == version_table_schema:
tables = tables.difference(
[autogen_context.migration_context.version_table]
)
conn_table_names.update(
(schema_name, tname)
for tname in tables
if autogen_context.run_name_filters(
tname, "table", {"schema_name": schema_name}
)
)
metadata_table_names = OrderedSet(
[(table.schema, table.name) for table in autogen_context.sorted_tables]
).difference([(version_table_schema, version_table)])
_compare_tables(
conn_table_names,
metadata_table_names,
inspector,
upgrade_ops,
autogen_context,
)
def _compare_tables(
conn_table_names: set,
metadata_table_names: set,
inspector: Inspector,
upgrade_ops: UpgradeOps,
autogen_context: AutogenContext,
) -> None:
default_schema = inspector.bind.dialect.default_schema_name
# tables coming from the connection will not have "schema"
# set if it matches default_schema_name; so we need a list
# of table names from local metadata that also have "None" if schema
# == default_schema_name. Most setups will be like this anyway but
# some are not (see #170)
metadata_table_names_no_dflt_schema = OrderedSet(
[
(schema if schema != default_schema else None, tname)
for schema, tname in metadata_table_names
]
)
# to adjust for the MetaData collection storing the tables either
# as "schemaname.tablename" or just "tablename", create a new lookup
# which will match the "non-default-schema" keys to the Table object.
tname_to_table = {
no_dflt_schema: autogen_context.table_key_to_table[
sa_schema._get_table_key(tname, schema)
]
for no_dflt_schema, (schema, tname) in zip(
metadata_table_names_no_dflt_schema, metadata_table_names
)
}
metadata_table_names = metadata_table_names_no_dflt_schema
for s, tname in metadata_table_names.difference(conn_table_names):
name = "%s.%s" % (s, tname) if s else tname
metadata_table = tname_to_table[(s, tname)]
if autogen_context.run_object_filters(
metadata_table, tname, "table", False, None
):
upgrade_ops.ops.append(
ops.CreateTableOp.from_table(metadata_table)
)
log.info("Detected added table %r", name)
modify_table_ops = ops.ModifyTableOps(tname, [], schema=s)
comparators.dispatch("table")(
autogen_context,
modify_table_ops,
s,
tname,
None,
metadata_table,
)
if not modify_table_ops.is_empty():
upgrade_ops.ops.append(modify_table_ops)
removal_metadata = sa_schema.MetaData()
for s, tname in conn_table_names.difference(metadata_table_names):
name = sa_schema._get_table_key(tname, s)
exists = name in removal_metadata.tables
t = sa_schema.Table(tname, removal_metadata, schema=s)
if not exists:
event.listen(
t,
"column_reflect",
# fmt: off
autogen_context.migration_context.impl.
_compat_autogen_column_reflect
(inspector),
# fmt: on
)
inspector.reflect_table(t, include_columns=None)
if autogen_context.run_object_filters(t, tname, "table", True, None):
modify_table_ops = ops.ModifyTableOps(tname, [], schema=s)
comparators.dispatch("table")(
autogen_context, modify_table_ops, s, tname, t, None
)
if not modify_table_ops.is_empty():
upgrade_ops.ops.append(modify_table_ops)
upgrade_ops.ops.append(ops.DropTableOp.from_table(t))
log.info("Detected removed table %r", name)
existing_tables = conn_table_names.intersection(metadata_table_names)
existing_metadata = sa_schema.MetaData()
conn_column_info = {}
for s, tname in existing_tables:
name = sa_schema._get_table_key(tname, s)
exists = name in existing_metadata.tables
t = sa_schema.Table(tname, existing_metadata, schema=s)
if not exists:
event.listen(
t,
"column_reflect",
# fmt: off
autogen_context.migration_context.impl.
_compat_autogen_column_reflect(inspector),
# fmt: on
)
inspector.reflect_table(t, include_columns=None)
conn_column_info[(s, tname)] = t
for s, tname in sorted(existing_tables, key=lambda x: (x[0] or "", x[1])):
s = s or None
name = "%s.%s" % (s, tname) if s else tname
metadata_table = tname_to_table[(s, tname)]
conn_table = existing_metadata.tables[name]
if autogen_context.run_object_filters(
metadata_table, tname, "table", False, conn_table
):
modify_table_ops = ops.ModifyTableOps(tname, [], schema=s)
with _compare_columns(
s,
tname,
conn_table,
metadata_table,
modify_table_ops,
autogen_context,
inspector,
):
comparators.dispatch("table")(
autogen_context,
modify_table_ops,
s,
tname,
conn_table,
metadata_table,
)
if not modify_table_ops.is_empty():
upgrade_ops.ops.append(modify_table_ops)
_IndexColumnSortingOps: Mapping[str, Any] = util.immutabledict(
{
"asc": expression.asc,
"desc": expression.desc,
"nulls_first": expression.nullsfirst,
"nulls_last": expression.nullslast,
"nullsfirst": expression.nullsfirst, # 1_3 name
"nullslast": expression.nullslast, # 1_3 name
}
)
def _make_index(
impl: DefaultImpl, params: Dict[str, Any], conn_table: Table
) -> Optional[Index]:
exprs: list[Union[Column[Any], TextClause]] = []
sorting = params.get("column_sorting")
for num, col_name in enumerate(params["column_names"]):
item: Union[Column[Any], TextClause]
if col_name is None:
assert "expressions" in params
name = params["expressions"][num]
item = text(name)
else:
name = col_name
item = conn_table.c[col_name]
if sorting and name in sorting:
for operator in sorting[name]:
if operator in _IndexColumnSortingOps:
item = _IndexColumnSortingOps[operator](item)
exprs.append(item)
ix = sa_schema.Index(
params["name"],
*exprs,
unique=params["unique"],
_table=conn_table,
**impl.adjust_reflected_dialect_options(params, "index"),
)
if "duplicates_constraint" in params:
ix.info["duplicates_constraint"] = params["duplicates_constraint"]
return ix
def _make_unique_constraint(
impl: DefaultImpl, params: Dict[str, Any], conn_table: Table
) -> UniqueConstraint:
uq = sa_schema.UniqueConstraint(
*[conn_table.c[cname] for cname in params["column_names"]],
name=params["name"],
**impl.adjust_reflected_dialect_options(params, "unique_constraint"),
)
if "duplicates_index" in params:
uq.info["duplicates_index"] = params["duplicates_index"]
return uq
def _make_foreign_key(
params: Dict[str, Any], conn_table: Table
) -> ForeignKeyConstraint:
tname = params["referred_table"]
if params["referred_schema"]:
tname = "%s.%s" % (params["referred_schema"], tname)
options = params.get("options", {})
const = sa_schema.ForeignKeyConstraint(
[conn_table.c[cname] for cname in params["constrained_columns"]],
["%s.%s" % (tname, n) for n in params["referred_columns"]],
onupdate=options.get("onupdate"),
ondelete=options.get("ondelete"),
deferrable=options.get("deferrable"),
initially=options.get("initially"),
name=params["name"],
)
# needed by 0.7
conn_table.append_constraint(const)
return const
@contextlib.contextmanager
def _compare_columns(
schema: Optional[str],
tname: Union[quoted_name, str],
conn_table: Table,
metadata_table: Table,
modify_table_ops: ModifyTableOps,
autogen_context: AutogenContext,
inspector: Inspector,
) -> Iterator[None]:
name = "%s.%s" % (schema, tname) if schema else tname
metadata_col_names = OrderedSet(
c.name for c in metadata_table.c if not c.system
)
metadata_cols_by_name = {
c.name: c for c in metadata_table.c if not c.system
}
conn_col_names = {
c.name: c
for c in conn_table.c
if autogen_context.run_name_filters(
c.name, "column", {"table_name": tname, "schema_name": schema}
)
}
for cname in metadata_col_names.difference(conn_col_names):
if autogen_context.run_object_filters(
metadata_cols_by_name[cname], cname, "column", False, None
):
modify_table_ops.ops.append(
ops.AddColumnOp.from_column_and_tablename(
schema, tname, metadata_cols_by_name[cname]
)
)
log.info("Detected added column '%s.%s'", name, cname)
for colname in metadata_col_names.intersection(conn_col_names):
metadata_col = metadata_cols_by_name[colname]
conn_col = conn_table.c[colname]
if not autogen_context.run_object_filters(
metadata_col, colname, "column", False, conn_col
):
continue
alter_column_op = ops.AlterColumnOp(tname, colname, schema=schema)
comparators.dispatch("column")(
autogen_context,
alter_column_op,
schema,
tname,
colname,
conn_col,
metadata_col,
)
if alter_column_op.has_changes():
modify_table_ops.ops.append(alter_column_op)
yield
for cname in set(conn_col_names).difference(metadata_col_names):
if autogen_context.run_object_filters(
conn_table.c[cname], cname, "column", True, None
):
modify_table_ops.ops.append(
ops.DropColumnOp.from_column_and_tablename(
schema, tname, conn_table.c[cname]
)
)
log.info("Detected removed column '%s.%s'", name, cname)
_C = TypeVar("_C", bound=Union[UniqueConstraint, ForeignKeyConstraint, Index])
@comparators.dispatch_for("table")
def _compare_indexes_and_uniques(
autogen_context: AutogenContext,
modify_ops: ModifyTableOps,
schema: Optional[str],
tname: Union[quoted_name, str],
conn_table: Optional[Table],
metadata_table: Optional[Table],
) -> None:
inspector = autogen_context.inspector
is_create_table = conn_table is None
is_drop_table = metadata_table is None
impl = autogen_context.migration_context.impl
# 1a. get raw indexes and unique constraints from metadata ...
if metadata_table is not None:
metadata_unique_constraints = {
uq
for uq in metadata_table.constraints
if isinstance(uq, sa_schema.UniqueConstraint)
}
metadata_indexes = set(metadata_table.indexes)
else:
metadata_unique_constraints = set()
metadata_indexes = set()
conn_uniques = conn_indexes = frozenset() # type:ignore[var-annotated]
supports_unique_constraints = False
unique_constraints_duplicate_unique_indexes = False
if conn_table is not None:
# 1b. ... and from connection, if the table exists
try:
conn_uniques = inspector.get_unique_constraints( # type:ignore[assignment] # noqa
tname, schema=schema
)
supports_unique_constraints = True
except NotImplementedError:
pass
except TypeError:
# number of arguments is off for the base
# method in SQLAlchemy due to the cache decorator
# not being present
pass
else:
conn_uniques = [ # type:ignore[assignment]
uq
for uq in conn_uniques
if autogen_context.run_name_filters(
uq["name"],
"unique_constraint",
{"table_name": tname, "schema_name": schema},
)
]
for uq in conn_uniques:
if uq.get("duplicates_index"):
unique_constraints_duplicate_unique_indexes = True
try:
conn_indexes = inspector.get_indexes( # type:ignore[assignment]
tname, schema=schema
)
except NotImplementedError:
pass
else:
conn_indexes = [ # type:ignore[assignment]
ix
for ix in conn_indexes
if autogen_context.run_name_filters(
ix["name"],
"index",
{"table_name": tname, "schema_name": schema},
)
]
# 2. convert conn-level objects from raw inspector records
# into schema objects
if is_drop_table:
# for DROP TABLE uniques are inline, don't need them
conn_uniques = set() # type:ignore[assignment]
else:
conn_uniques = { # type:ignore[assignment]
_make_unique_constraint(impl, uq_def, conn_table)
for uq_def in conn_uniques
}
conn_indexes = { # type:ignore[assignment]
index
for index in (
_make_index(impl, ix, conn_table) for ix in conn_indexes
)
if index is not None
}
# 2a. if the dialect dupes unique indexes as unique constraints
# (mysql and oracle), correct for that
if unique_constraints_duplicate_unique_indexes:
_correct_for_uq_duplicates_uix(
conn_uniques,
conn_indexes,
metadata_unique_constraints,
metadata_indexes,
autogen_context.dialect,
impl,
)
# 3. give the dialect a chance to omit indexes and constraints that
# we know are either added implicitly by the DB or that the DB
# can't accurately report on
impl.correct_for_autogen_constraints(
conn_uniques, # type: ignore[arg-type]
conn_indexes, # type: ignore[arg-type]
metadata_unique_constraints,
metadata_indexes,
)
# 4. organize the constraints into "signature" collections, the
# _constraint_sig() objects provide a consistent facade over both
# Index and UniqueConstraint so we can easily work with them
# interchangeably
metadata_unique_constraints_sig = {
impl._create_metadata_constraint_sig(uq)
for uq in metadata_unique_constraints
}
metadata_indexes_sig = {
impl._create_metadata_constraint_sig(ix) for ix in metadata_indexes
}
conn_unique_constraints = {
impl._create_reflected_constraint_sig(uq) for uq in conn_uniques
}
conn_indexes_sig = {
impl._create_reflected_constraint_sig(ix) for ix in conn_indexes
}
# 5. index things by name, for those objects that have names
metadata_names = {
cast(str, c.md_name_to_sql_name(autogen_context)): c
for c in metadata_unique_constraints_sig.union(metadata_indexes_sig)
if c.is_named
}
conn_uniques_by_name: Dict[sqla_compat._ConstraintName, _constraint_sig]
conn_indexes_by_name: Dict[sqla_compat._ConstraintName, _constraint_sig]
conn_uniques_by_name = {c.name: c for c in conn_unique_constraints}
conn_indexes_by_name = {c.name: c for c in conn_indexes_sig}
conn_names = {
c.name: c
for c in conn_unique_constraints.union(conn_indexes_sig)
if sqla_compat.constraint_name_string(c.name)
}
doubled_constraints = {
name: (conn_uniques_by_name[name], conn_indexes_by_name[name])
for name in set(conn_uniques_by_name).intersection(
conn_indexes_by_name
)
}
# 6. index things by "column signature", to help with unnamed unique
# constraints.
conn_uniques_by_sig = {uq.unnamed: uq for uq in conn_unique_constraints}
metadata_uniques_by_sig = {
uq.unnamed: uq for uq in metadata_unique_constraints_sig
}
unnamed_metadata_uniques = {
uq.unnamed: uq
for uq in metadata_unique_constraints_sig
if not sqla_compat._constraint_is_named(
uq.const, autogen_context.dialect
)
}
# assumptions:
# 1. a unique constraint or an index from the connection *always*
# has a name.
# 2. an index on the metadata side *always* has a name.
# 3. a unique constraint on the metadata side *might* have a name.
# 4. The backend may double up indexes as unique constraints and
# vice versa (e.g. MySQL, Postgresql)
def obj_added(obj: _constraint_sig):
if is_index_sig(obj):
if autogen_context.run_object_filters(
obj.const, obj.name, "index", False, None
):
modify_ops.ops.append(ops.CreateIndexOp.from_index(obj.const))
log.info(
"Detected added index '%r' on '%s'",
obj.name,
obj.column_names,
)
elif is_uq_sig(obj):
if not supports_unique_constraints:
# can't report unique indexes as added if we don't
# detect them
return
if is_create_table or is_drop_table:
# unique constraints are created inline with table defs
return
if autogen_context.run_object_filters(
obj.const, obj.name, "unique_constraint", False, None
):
modify_ops.ops.append(
ops.AddConstraintOp.from_constraint(obj.const)
)
log.info(
"Detected added unique constraint %r on '%s'",
obj.name,
obj.column_names,
)
else:
assert False
def obj_removed(obj: _constraint_sig):
if is_index_sig(obj):
if obj.is_unique and not supports_unique_constraints:
# many databases double up unique constraints
# as unique indexes. without that list we can't
# be sure what we're doing here
return
if autogen_context.run_object_filters(
obj.const, obj.name, "index", True, None
):
modify_ops.ops.append(ops.DropIndexOp.from_index(obj.const))
log.info("Detected removed index %r on %r", obj.name, tname)
elif is_uq_sig(obj):
if is_create_table or is_drop_table:
# if the whole table is being dropped, we don't need to
# consider unique constraint separately
return
if autogen_context.run_object_filters(
obj.const, obj.name, "unique_constraint", True, None
):
modify_ops.ops.append(
ops.DropConstraintOp.from_constraint(obj.const)
)
log.info(
"Detected removed unique constraint %r on %r",
obj.name,
tname,
)
else:
assert False
def obj_changed(
old: _constraint_sig,
new: _constraint_sig,
msg: str,
):
if is_index_sig(old):
assert is_index_sig(new)
if autogen_context.run_object_filters(
new.const, new.name, "index", False, old.const
):
log.info(
"Detected changed index %r on %r: %s", old.name, tname, msg
)
modify_ops.ops.append(ops.DropIndexOp.from_index(old.const))
modify_ops.ops.append(ops.CreateIndexOp.from_index(new.const))
elif is_uq_sig(old):
assert is_uq_sig(new)
if autogen_context.run_object_filters(
new.const, new.name, "unique_constraint", False, old.const
):
log.info(
"Detected changed unique constraint %r on %r: %s",
old.name,
tname,
msg,
)
modify_ops.ops.append(
ops.DropConstraintOp.from_constraint(old.const)
)
modify_ops.ops.append(
ops.AddConstraintOp.from_constraint(new.const)
)
else:
assert False
for removed_name in sorted(set(conn_names).difference(metadata_names)):
conn_obj = conn_names[removed_name]
if (
is_uq_sig(conn_obj)
and conn_obj.unnamed in unnamed_metadata_uniques
):
continue
elif removed_name in doubled_constraints:
conn_uq, conn_idx = doubled_constraints[removed_name]
if (
all(
conn_idx.unnamed != meta_idx.unnamed
for meta_idx in metadata_indexes_sig
)
and conn_uq.unnamed not in metadata_uniques_by_sig
):
obj_removed(conn_uq)
obj_removed(conn_idx)
else:
obj_removed(conn_obj)
for existing_name in sorted(set(metadata_names).intersection(conn_names)):
metadata_obj = metadata_names[existing_name]
if existing_name in doubled_constraints:
conn_uq, conn_idx = doubled_constraints[existing_name]
if is_index_sig(metadata_obj):
conn_obj = conn_idx
else:
conn_obj = conn_uq
else:
conn_obj = conn_names[existing_name]
if type(conn_obj) != type(metadata_obj):
obj_removed(conn_obj)
obj_added(metadata_obj)
else:
comparison = metadata_obj.compare_to_reflected(conn_obj)
if comparison.is_different:
# constraint are different
obj_changed(conn_obj, metadata_obj, comparison.message)
elif comparison.is_skip:
# constraint cannot be compared, skip them
thing = (
"index" if is_index_sig(conn_obj) else "unique constraint"
)
log.info(
"Cannot compare %s %r, assuming equal and skipping. %s",
thing,
conn_obj.name,
comparison.message,
)
else:
# constraint are equal
assert comparison.is_equal
for added_name in sorted(set(metadata_names).difference(conn_names)):
obj = metadata_names[added_name]
obj_added(obj)
for uq_sig in unnamed_metadata_uniques:
if uq_sig not in conn_uniques_by_sig:
obj_added(unnamed_metadata_uniques[uq_sig])
def _correct_for_uq_duplicates_uix(
conn_unique_constraints,
conn_indexes,
metadata_unique_constraints,
metadata_indexes,
dialect,
impl,
):
# dedupe unique indexes vs. constraints, since MySQL / Oracle
# doesn't really have unique constraints as a separate construct.
# but look in the metadata and try to maintain constructs
# that already seem to be defined one way or the other
# on that side. This logic was formerly local to MySQL dialect,
# generalized to Oracle and others. See #276
# resolve final rendered name for unique constraints defined in the
# metadata. this includes truncation of long names. naming convention
# names currently should already be set as cons.name, however leave this
# to the sqla_compat to decide.
metadata_cons_names = [
(sqla_compat._get_constraint_final_name(cons, dialect), cons)
for cons in metadata_unique_constraints
]
metadata_uq_names = {
name for name, cons in metadata_cons_names if name is not None
}
unnamed_metadata_uqs = {
impl._create_metadata_constraint_sig(cons).unnamed
for name, cons in metadata_cons_names
if name is None
}
metadata_ix_names = {
sqla_compat._get_constraint_final_name(cons, dialect)
for cons in metadata_indexes
if cons.unique
}
# for reflection side, names are in their final database form
# already since they're from the database
conn_ix_names = {cons.name: cons for cons in conn_indexes if cons.unique}
uqs_dupe_indexes = {
cons.name: cons
for cons in conn_unique_constraints
if cons.info["duplicates_index"]
}
for overlap in uqs_dupe_indexes:
if overlap not in metadata_uq_names:
if (
impl._create_reflected_constraint_sig(
uqs_dupe_indexes[overlap]
).unnamed
not in unnamed_metadata_uqs
):
conn_unique_constraints.discard(uqs_dupe_indexes[overlap])
elif overlap not in metadata_ix_names:
conn_indexes.discard(conn_ix_names[overlap])
@comparators.dispatch_for("column")
def _compare_nullable(
autogen_context: AutogenContext,
alter_column_op: AlterColumnOp,
schema: Optional[str],
tname: Union[quoted_name, str],
cname: Union[quoted_name, str],
conn_col: Column[Any],
metadata_col: Column[Any],
) -> None:
metadata_col_nullable = metadata_col.nullable
conn_col_nullable = conn_col.nullable
alter_column_op.existing_nullable = conn_col_nullable
if conn_col_nullable is not metadata_col_nullable:
if (
sqla_compat._server_default_is_computed(
metadata_col.server_default, conn_col.server_default
)
and sqla_compat._nullability_might_be_unset(metadata_col)
or (
sqla_compat._server_default_is_identity(
metadata_col.server_default, conn_col.server_default
)
)
):
log.info(
"Ignoring nullable change on identity column '%s.%s'",
tname,
cname,
)
else:
alter_column_op.modify_nullable = metadata_col_nullable
log.info(
"Detected %s on column '%s.%s'",
"NULL" if metadata_col_nullable else "NOT NULL",
tname,
cname,
)
@comparators.dispatch_for("column")
def _setup_autoincrement(
autogen_context: AutogenContext,
alter_column_op: AlterColumnOp,
schema: Optional[str],
tname: Union[quoted_name, str],
cname: quoted_name,
conn_col: Column[Any],
metadata_col: Column[Any],
) -> None:
if metadata_col.table._autoincrement_column is metadata_col:
alter_column_op.kw["autoincrement"] = True
elif metadata_col.autoincrement is True:
alter_column_op.kw["autoincrement"] = True
elif metadata_col.autoincrement is False:
alter_column_op.kw["autoincrement"] = False
@comparators.dispatch_for("column")
def _compare_type(
autogen_context: AutogenContext,
alter_column_op: AlterColumnOp,
schema: Optional[str],
tname: Union[quoted_name, str],
cname: Union[quoted_name, str],
conn_col: Column[Any],
metadata_col: Column[Any],
) -> None:
conn_type = conn_col.type
alter_column_op.existing_type = conn_type
metadata_type = metadata_col.type
if conn_type._type_affinity is sqltypes.NullType:
log.info(
"Couldn't determine database type " "for column '%s.%s'",
tname,
cname,
)
return
if metadata_type._type_affinity is sqltypes.NullType:
log.info(
"Column '%s.%s' has no type within " "the model; can't compare",
tname,
cname,
)
return
isdiff = autogen_context.migration_context._compare_type(
conn_col, metadata_col
)
if isdiff:
alter_column_op.modify_type = metadata_type
log.info(
"Detected type change from %r to %r on '%s.%s'",
conn_type,
metadata_type,
tname,
cname,
)
def _render_server_default_for_compare(
metadata_default: Optional[Any], autogen_context: AutogenContext
) -> Optional[str]:
if isinstance(metadata_default, sa_schema.DefaultClause):
if isinstance(metadata_default.arg, str):
metadata_default = metadata_default.arg
else:
metadata_default = str(
metadata_default.arg.compile(
dialect=autogen_context.dialect,
compile_kwargs={"literal_binds": True},
)
)
if isinstance(metadata_default, str):
return metadata_default
else:
return None
def _normalize_computed_default(sqltext: str) -> str:
"""we want to warn if a computed sql expression has changed. however
we don't want false positives and the warning is not that critical.
so filter out most forms of variability from the SQL text.
"""
return re.sub(r"[ \(\)'\"`\[\]\t\r\n]", "", sqltext).lower()
def _compare_computed_default(
autogen_context: AutogenContext,
alter_column_op: AlterColumnOp,
schema: Optional[str],
tname: str,
cname: str,
conn_col: Column[Any],
metadata_col: Column[Any],
) -> None:
rendered_metadata_default = str(
cast(sa_schema.Computed, metadata_col.server_default).sqltext.compile(
dialect=autogen_context.dialect,
compile_kwargs={"literal_binds": True},
)
)
# since we cannot change computed columns, we do only a crude comparison
# here where we try to eliminate syntactical differences in order to
# get a minimal comparison just to emit a warning.
rendered_metadata_default = _normalize_computed_default(
rendered_metadata_default
)
if isinstance(conn_col.server_default, sa_schema.Computed):
rendered_conn_default = str(
conn_col.server_default.sqltext.compile(
dialect=autogen_context.dialect,
compile_kwargs={"literal_binds": True},
)
)
if rendered_conn_default is None:
rendered_conn_default = ""
else:
rendered_conn_default = _normalize_computed_default(
rendered_conn_default
)
else:
rendered_conn_default = ""
if rendered_metadata_default != rendered_conn_default:
_warn_computed_not_supported(tname, cname)
def _warn_computed_not_supported(tname: str, cname: str) -> None:
util.warn("Computed default on %s.%s cannot be modified" % (tname, cname))
def _compare_identity_default(
autogen_context,
alter_column_op,
schema,
tname,
cname,
conn_col,
metadata_col,
):
impl = autogen_context.migration_context.impl
diff, ignored_attr, is_alter = impl._compare_identity_default(
metadata_col.server_default, conn_col.server_default
)
return diff, is_alter
@comparators.dispatch_for("column")
def _compare_server_default(
autogen_context: AutogenContext,
alter_column_op: AlterColumnOp,
schema: Optional[str],
tname: Union[quoted_name, str],
cname: Union[quoted_name, str],
conn_col: Column[Any],
metadata_col: Column[Any],
) -> Optional[bool]:
metadata_default = metadata_col.server_default
conn_col_default = conn_col.server_default
if conn_col_default is None and metadata_default is None:
return False
if sqla_compat._server_default_is_computed(metadata_default):
return _compare_computed_default( # type:ignore[func-returns-value]
autogen_context,
alter_column_op,
schema,
tname,
cname,
conn_col,
metadata_col,
)
if sqla_compat._server_default_is_computed(conn_col_default):
_warn_computed_not_supported(tname, cname)
return False
if sqla_compat._server_default_is_identity(
metadata_default, conn_col_default
):
alter_column_op.existing_server_default = conn_col_default
diff, is_alter = _compare_identity_default(
autogen_context,
alter_column_op,
schema,
tname,
cname,
conn_col,
metadata_col,
)
if is_alter:
alter_column_op.modify_server_default = metadata_default
if diff:
log.info(
"Detected server default on column '%s.%s': "
"identity options attributes %s",
tname,
cname,
sorted(diff),
)
else:
rendered_metadata_default = _render_server_default_for_compare(
metadata_default, autogen_context
)
rendered_conn_default = (
cast(Any, conn_col_default).arg.text if conn_col_default else None
)
alter_column_op.existing_server_default = conn_col_default
is_diff = autogen_context.migration_context._compare_server_default(
conn_col,
metadata_col,
rendered_metadata_default,
rendered_conn_default,
)
if is_diff:
alter_column_op.modify_server_default = metadata_default
log.info("Detected server default on column '%s.%s'", tname, cname)
return None
@comparators.dispatch_for("column")
def _compare_column_comment(
autogen_context: AutogenContext,
alter_column_op: AlterColumnOp,
schema: Optional[str],
tname: Union[quoted_name, str],
cname: quoted_name,
conn_col: Column[Any],
metadata_col: Column[Any],
) -> Optional[Literal[False]]:
assert autogen_context.dialect is not None
if not autogen_context.dialect.supports_comments:
return None
metadata_comment = metadata_col.comment
conn_col_comment = conn_col.comment
if conn_col_comment is None and metadata_comment is None:
return False
alter_column_op.existing_comment = conn_col_comment
if conn_col_comment != metadata_comment:
alter_column_op.modify_comment = metadata_comment
log.info("Detected column comment '%s.%s'", tname, cname)
return None
@comparators.dispatch_for("table")
def _compare_foreign_keys(
autogen_context: AutogenContext,
modify_table_ops: ModifyTableOps,
schema: Optional[str],
tname: Union[quoted_name, str],
conn_table: Table,
metadata_table: Table,
) -> None:
# if we're doing CREATE TABLE, all FKs are created
# inline within the table def
if conn_table is None or metadata_table is None:
return
inspector = autogen_context.inspector
metadata_fks = {
fk
for fk in metadata_table.constraints
if isinstance(fk, sa_schema.ForeignKeyConstraint)
}
conn_fks_list = [
fk
for fk in inspector.get_foreign_keys(tname, schema=schema)
if autogen_context.run_name_filters(
fk["name"],
"foreign_key_constraint",
{"table_name": tname, "schema_name": schema},
)
]
conn_fks = {
_make_foreign_key(const, conn_table) # type: ignore[arg-type]
for const in conn_fks_list
}
impl = autogen_context.migration_context.impl
# give the dialect a chance to correct the FKs to match more
# closely
autogen_context.migration_context.impl.correct_for_autogen_foreignkeys(
conn_fks, metadata_fks
)
metadata_fks_sig = {
impl._create_metadata_constraint_sig(fk) for fk in metadata_fks
}
conn_fks_sig = {
impl._create_reflected_constraint_sig(fk) for fk in conn_fks
}
# check if reflected FKs include options, indicating the backend
# can reflect FK options
if conn_fks_list and "options" in conn_fks_list[0]:
conn_fks_by_sig = {c.unnamed: c for c in conn_fks_sig}
metadata_fks_by_sig = {c.unnamed: c for c in metadata_fks_sig}
else:
# otherwise compare by sig without options added
conn_fks_by_sig = {c.unnamed_no_options: c for c in conn_fks_sig}
metadata_fks_by_sig = {
c.unnamed_no_options: c for c in metadata_fks_sig
}
metadata_fks_by_name = {
c.name: c for c in metadata_fks_sig if c.name is not None
}
conn_fks_by_name = {c.name: c for c in conn_fks_sig if c.name is not None}
def _add_fk(obj, compare_to):
if autogen_context.run_object_filters(
obj.const, obj.name, "foreign_key_constraint", False, compare_to
):
modify_table_ops.ops.append(
ops.CreateForeignKeyOp.from_constraint(const.const) # type: ignore[has-type] # noqa: E501
)
log.info(
"Detected added foreign key (%s)(%s) on table %s%s",
", ".join(obj.source_columns),
", ".join(obj.target_columns),
"%s." % obj.source_schema if obj.source_schema else "",
obj.source_table,
)
def _remove_fk(obj, compare_to):
if autogen_context.run_object_filters(
obj.const, obj.name, "foreign_key_constraint", True, compare_to
):
modify_table_ops.ops.append(
ops.DropConstraintOp.from_constraint(obj.const)
)
log.info(
"Detected removed foreign key (%s)(%s) on table %s%s",
", ".join(obj.source_columns),
", ".join(obj.target_columns),
"%s." % obj.source_schema if obj.source_schema else "",
obj.source_table,
)
# so far it appears we don't need to do this by name at all.
# SQLite doesn't preserve constraint names anyway
for removed_sig in set(conn_fks_by_sig).difference(metadata_fks_by_sig):
const = conn_fks_by_sig[removed_sig]
if removed_sig not in metadata_fks_by_sig:
compare_to = (
metadata_fks_by_name[const.name].const
if const.name in metadata_fks_by_name
else None
)
_remove_fk(const, compare_to)
for added_sig in set(metadata_fks_by_sig).difference(conn_fks_by_sig):
const = metadata_fks_by_sig[added_sig]
if added_sig not in conn_fks_by_sig:
compare_to = (
conn_fks_by_name[const.name].const
if const.name in conn_fks_by_name
else None
)
_add_fk(const, compare_to)
@comparators.dispatch_for("table")
def _compare_table_comment(
autogen_context: AutogenContext,
modify_table_ops: ModifyTableOps,
schema: Optional[str],
tname: Union[quoted_name, str],
conn_table: Optional[Table],
metadata_table: Optional[Table],
) -> None:
assert autogen_context.dialect is not None
if not autogen_context.dialect.supports_comments:
return
# if we're doing CREATE TABLE, comments will be created inline
# with the create_table op.
if conn_table is None or metadata_table is None:
return
if conn_table.comment is None and metadata_table.comment is None:
return
if metadata_table.comment is None and conn_table.comment is not None:
modify_table_ops.ops.append(
ops.DropTableCommentOp(
tname, existing_comment=conn_table.comment, schema=schema
)
)
elif metadata_table.comment != conn_table.comment:
modify_table_ops.ops.append(
ops.CreateTableCommentOp(
tname,
metadata_table.comment,
existing_comment=conn_table.comment,
schema=schema,
)
)
|
# mypy: allow-untyped-defs, allow-incomplete-defs, allow-untyped-calls
# mypy: no-warn-return-any, allow-any-generics
from __future__ import annotations
from io import StringIO
import re
from typing import Any
from typing import cast
from typing import Dict
from typing import List
from typing import Optional
from typing import Tuple
from typing import TYPE_CHECKING
from typing import Union
from mako.pygen import PythonPrinter
from sqlalchemy import schema as sa_schema
from sqlalchemy import sql
from sqlalchemy import types as sqltypes
from sqlalchemy.sql.elements import conv
from sqlalchemy.sql.elements import Label
from sqlalchemy.sql.elements import quoted_name
from .. import util
from ..operations import ops
from ..util import sqla_compat
if TYPE_CHECKING:
from typing import Literal
from sqlalchemy import Computed
from sqlalchemy import Identity
from sqlalchemy.sql.base import DialectKWArgs
from sqlalchemy.sql.elements import ColumnElement
from sqlalchemy.sql.elements import TextClause
from sqlalchemy.sql.schema import CheckConstraint
from sqlalchemy.sql.schema import Column
from sqlalchemy.sql.schema import Constraint
from sqlalchemy.sql.schema import FetchedValue
from sqlalchemy.sql.schema import ForeignKey
from sqlalchemy.sql.schema import ForeignKeyConstraint
from sqlalchemy.sql.schema import Index
from sqlalchemy.sql.schema import MetaData
from sqlalchemy.sql.schema import PrimaryKeyConstraint
from sqlalchemy.sql.schema import UniqueConstraint
from sqlalchemy.sql.sqltypes import ARRAY
from sqlalchemy.sql.type_api import TypeEngine
from alembic.autogenerate.api import AutogenContext
from alembic.config import Config
from alembic.operations.ops import MigrationScript
from alembic.operations.ops import ModifyTableOps
MAX_PYTHON_ARGS = 255
def _render_gen_name(
autogen_context: AutogenContext,
name: sqla_compat._ConstraintName,
) -> Optional[Union[quoted_name, str, _f_name]]:
if isinstance(name, conv):
return _f_name(_alembic_autogenerate_prefix(autogen_context), name)
else:
return sqla_compat.constraint_name_or_none(name)
def _indent(text: str) -> str:
text = re.compile(r"^", re.M).sub(" ", text).strip()
text = re.compile(r" +$", re.M).sub("", text)
return text
def _render_python_into_templatevars(
autogen_context: AutogenContext,
migration_script: MigrationScript,
template_args: Dict[str, Union[str, Config]],
) -> None:
imports = autogen_context.imports
for upgrade_ops, downgrade_ops in zip(
migration_script.upgrade_ops_list, migration_script.downgrade_ops_list
):
template_args[upgrade_ops.upgrade_token] = _indent(
_render_cmd_body(upgrade_ops, autogen_context)
)
template_args[downgrade_ops.downgrade_token] = _indent(
_render_cmd_body(downgrade_ops, autogen_context)
)
template_args["imports"] = "\n".join(sorted(imports))
default_renderers = renderers = util.Dispatcher()
def _render_cmd_body(
op_container: ops.OpContainer,
autogen_context: AutogenContext,
) -> str:
buf = StringIO()
printer = PythonPrinter(buf)
printer.writeline(
"# ### commands auto generated by Alembic - please adjust! ###"
)
has_lines = False
for op in op_container.ops:
lines = render_op(autogen_context, op)
has_lines = has_lines or bool(lines)
for line in lines:
printer.writeline(line)
if not has_lines:
printer.writeline("pass")
printer.writeline("# ### end Alembic commands ###")
return buf.getvalue()
def render_op(
autogen_context: AutogenContext, op: ops.MigrateOperation
) -> List[str]:
renderer = renderers.dispatch(op)
lines = util.to_list(renderer(autogen_context, op))
return lines
def render_op_text(
autogen_context: AutogenContext, op: ops.MigrateOperation
) -> str:
return "\n".join(render_op(autogen_context, op))
@renderers.dispatch_for(ops.ModifyTableOps)
def _render_modify_table(
autogen_context: AutogenContext, op: ModifyTableOps
) -> List[str]:
opts = autogen_context.opts
render_as_batch = opts.get("render_as_batch", False)
if op.ops:
lines = []
if render_as_batch:
with autogen_context._within_batch():
lines.append(
"with op.batch_alter_table(%r, schema=%r) as batch_op:"
% (op.table_name, op.schema)
)
for t_op in op.ops:
t_lines = render_op(autogen_context, t_op)
lines.extend(t_lines)
lines.append("")
else:
for t_op in op.ops:
t_lines = render_op(autogen_context, t_op)
lines.extend(t_lines)
return lines
else:
return []
@renderers.dispatch_for(ops.CreateTableCommentOp)
def _render_create_table_comment(
autogen_context: AutogenContext, op: ops.CreateTableCommentOp
) -> str:
if autogen_context._has_batch:
templ = (
"{prefix}create_table_comment(\n"
"{indent}{comment},\n"
"{indent}existing_comment={existing}\n"
")"
)
else:
templ = (
"{prefix}create_table_comment(\n"
"{indent}'{tname}',\n"
"{indent}{comment},\n"
"{indent}existing_comment={existing},\n"
"{indent}schema={schema}\n"
")"
)
return templ.format(
prefix=_alembic_autogenerate_prefix(autogen_context),
tname=op.table_name,
comment="%r" % op.comment if op.comment is not None else None,
existing=(
"%r" % op.existing_comment
if op.existing_comment is not None
else None
),
schema="'%s'" % op.schema if op.schema is not None else None,
indent=" ",
)
@renderers.dispatch_for(ops.DropTableCommentOp)
def _render_drop_table_comment(
autogen_context: AutogenContext, op: ops.DropTableCommentOp
) -> str:
if autogen_context._has_batch:
templ = (
"{prefix}drop_table_comment(\n"
"{indent}existing_comment={existing}\n"
")"
)
else:
templ = (
"{prefix}drop_table_comment(\n"
"{indent}'{tname}',\n"
"{indent}existing_comment={existing},\n"
"{indent}schema={schema}\n"
")"
)
return templ.format(
prefix=_alembic_autogenerate_prefix(autogen_context),
tname=op.table_name,
existing=(
"%r" % op.existing_comment
if op.existing_comment is not None
else None
),
schema="'%s'" % op.schema if op.schema is not None else None,
indent=" ",
)
@renderers.dispatch_for(ops.CreateTableOp)
def _add_table(autogen_context: AutogenContext, op: ops.CreateTableOp) -> str:
table = op.to_table()
args = [
col
for col in [
_render_column(col, autogen_context) for col in table.columns
]
if col
] + sorted(
[
rcons
for rcons in [
_render_constraint(
cons, autogen_context, op._namespace_metadata
)
for cons in table.constraints
]
if rcons is not None
]
)
if len(args) > MAX_PYTHON_ARGS:
args_str = "*[" + ",\n".join(args) + "]"
else:
args_str = ",\n".join(args)
text = "%(prefix)screate_table(%(tablename)r,\n%(args)s" % {
"tablename": _ident(op.table_name),
"prefix": _alembic_autogenerate_prefix(autogen_context),
"args": args_str,
}
if op.schema:
text += ",\nschema=%r" % _ident(op.schema)
comment = table.comment
if comment:
text += ",\ncomment=%r" % _ident(comment)
info = table.info
if info:
text += f",\ninfo={info!r}"
for k in sorted(op.kw):
text += ",\n%s=%r" % (k.replace(" ", "_"), op.kw[k])
if table._prefixes:
prefixes = ", ".join("'%s'" % p for p in table._prefixes)
text += ",\nprefixes=[%s]" % prefixes
if op.if_not_exists is not None:
text += ",\nif_not_exists=%r" % bool(op.if_not_exists)
text += "\n)"
return text
@renderers.dispatch_for(ops.DropTableOp)
def _drop_table(autogen_context: AutogenContext, op: ops.DropTableOp) -> str:
text = "%(prefix)sdrop_table(%(tname)r" % {
"prefix": _alembic_autogenerate_prefix(autogen_context),
"tname": _ident(op.table_name),
}
if op.schema:
text += ", schema=%r" % _ident(op.schema)
if op.if_exists is not None:
text += ", if_exists=%r" % bool(op.if_exists)
text += ")"
return text
def _render_dialect_kwargs_items(
autogen_context: AutogenContext, item: DialectKWArgs
) -> list[str]:
return [
f"{key}={_render_potential_expr(val, autogen_context)}"
for key, val in item.dialect_kwargs.items()
]
@renderers.dispatch_for(ops.CreateIndexOp)
def _add_index(autogen_context: AutogenContext, op: ops.CreateIndexOp) -> str:
index = op.to_index()
has_batch = autogen_context._has_batch
if has_batch:
tmpl = (
"%(prefix)screate_index(%(name)r, [%(columns)s], "
"unique=%(unique)r%(kwargs)s)"
)
else:
tmpl = (
"%(prefix)screate_index(%(name)r, %(table)r, [%(columns)s], "
"unique=%(unique)r%(schema)s%(kwargs)s)"
)
assert index.table is not None
opts = _render_dialect_kwargs_items(autogen_context, index)
if op.if_not_exists is not None:
opts.append("if_not_exists=%r" % bool(op.if_not_exists))
text = tmpl % {
"prefix": _alembic_autogenerate_prefix(autogen_context),
"name": _render_gen_name(autogen_context, index.name),
"table": _ident(index.table.name),
"columns": ", ".join(
_get_index_rendered_expressions(index, autogen_context)
),
"unique": index.unique or False,
"schema": (
(", schema=%r" % _ident(index.table.schema))
if index.table.schema
else ""
),
"kwargs": ", " + ", ".join(opts) if opts else "",
}
return text
@renderers.dispatch_for(ops.DropIndexOp)
def _drop_index(autogen_context: AutogenContext, op: ops.DropIndexOp) -> str:
index = op.to_index()
has_batch = autogen_context._has_batch
if has_batch:
tmpl = "%(prefix)sdrop_index(%(name)r%(kwargs)s)"
else:
tmpl = (
"%(prefix)sdrop_index(%(name)r, "
"table_name=%(table_name)r%(schema)s%(kwargs)s)"
)
opts = _render_dialect_kwargs_items(autogen_context, index)
if op.if_exists is not None:
opts.append("if_exists=%r" % bool(op.if_exists))
text = tmpl % {
"prefix": _alembic_autogenerate_prefix(autogen_context),
"name": _render_gen_name(autogen_context, op.index_name),
"table_name": _ident(op.table_name),
"schema": ((", schema=%r" % _ident(op.schema)) if op.schema else ""),
"kwargs": ", " + ", ".join(opts) if opts else "",
}
return text
@renderers.dispatch_for(ops.CreateUniqueConstraintOp)
def _add_unique_constraint(
autogen_context: AutogenContext, op: ops.CreateUniqueConstraintOp
) -> List[str]:
return [_uq_constraint(op.to_constraint(), autogen_context, True)]
@renderers.dispatch_for(ops.CreateForeignKeyOp)
def _add_fk_constraint(
autogen_context: AutogenContext, op: ops.CreateForeignKeyOp
) -> str:
args = [repr(_render_gen_name(autogen_context, op.constraint_name))]
if not autogen_context._has_batch:
args.append(repr(_ident(op.source_table)))
args.extend(
[
repr(_ident(op.referent_table)),
repr([_ident(col) for col in op.local_cols]),
repr([_ident(col) for col in op.remote_cols]),
]
)
kwargs = [
"referent_schema",
"onupdate",
"ondelete",
"initially",
"deferrable",
"use_alter",
"match",
]
if not autogen_context._has_batch:
kwargs.insert(0, "source_schema")
for k in kwargs:
if k in op.kw:
value = op.kw[k]
if value is not None:
args.append("%s=%r" % (k, value))
return "%(prefix)screate_foreign_key(%(args)s)" % {
"prefix": _alembic_autogenerate_prefix(autogen_context),
"args": ", ".join(args),
}
@renderers.dispatch_for(ops.CreatePrimaryKeyOp)
def _add_pk_constraint(constraint, autogen_context):
raise NotImplementedError()
@renderers.dispatch_for(ops.CreateCheckConstraintOp)
def _add_check_constraint(constraint, autogen_context):
raise NotImplementedError()
@renderers.dispatch_for(ops.DropConstraintOp)
def _drop_constraint(
autogen_context: AutogenContext, op: ops.DropConstraintOp
) -> str:
prefix = _alembic_autogenerate_prefix(autogen_context)
name = _render_gen_name(autogen_context, op.constraint_name)
schema = _ident(op.schema) if op.schema else None
type_ = _ident(op.constraint_type) if op.constraint_type else None
params_strs = []
params_strs.append(repr(name))
if not autogen_context._has_batch:
params_strs.append(repr(_ident(op.table_name)))
if schema is not None:
params_strs.append(f"schema={schema!r}")
if type_ is not None:
params_strs.append(f"type_={type_!r}")
return f"{prefix}drop_constraint({', '.join(params_strs)})"
@renderers.dispatch_for(ops.AddColumnOp)
def _add_column(autogen_context: AutogenContext, op: ops.AddColumnOp) -> str:
schema, tname, column = op.schema, op.table_name, op.column
if autogen_context._has_batch:
template = "%(prefix)sadd_column(%(column)s)"
else:
template = "%(prefix)sadd_column(%(tname)r, %(column)s"
if schema:
template += ", schema=%(schema)r"
template += ")"
text = template % {
"prefix": _alembic_autogenerate_prefix(autogen_context),
"tname": tname,
"column": _render_column(column, autogen_context),
"schema": schema,
}
return text
@renderers.dispatch_for(ops.DropColumnOp)
def _drop_column(autogen_context: AutogenContext, op: ops.DropColumnOp) -> str:
schema, tname, column_name = op.schema, op.table_name, op.column_name
if autogen_context._has_batch:
template = "%(prefix)sdrop_column(%(cname)r)"
else:
template = "%(prefix)sdrop_column(%(tname)r, %(cname)r"
if schema:
template += ", schema=%(schema)r"
template += ")"
text = template % {
"prefix": _alembic_autogenerate_prefix(autogen_context),
"tname": _ident(tname),
"cname": _ident(column_name),
"schema": _ident(schema),
}
return text
@renderers.dispatch_for(ops.AlterColumnOp)
def _alter_column(
autogen_context: AutogenContext, op: ops.AlterColumnOp
) -> str:
tname = op.table_name
cname = op.column_name
server_default = op.modify_server_default
type_ = op.modify_type
nullable = op.modify_nullable
comment = op.modify_comment
newname = op.modify_name
autoincrement = op.kw.get("autoincrement", None)
existing_type = op.existing_type
existing_nullable = op.existing_nullable
existing_comment = op.existing_comment
existing_server_default = op.existing_server_default
schema = op.schema
indent = " " * 11
if autogen_context._has_batch:
template = "%(prefix)salter_column(%(cname)r"
else:
template = "%(prefix)salter_column(%(tname)r, %(cname)r"
text = template % {
"prefix": _alembic_autogenerate_prefix(autogen_context),
"tname": tname,
"cname": cname,
}
if existing_type is not None:
text += ",\n%sexisting_type=%s" % (
indent,
_repr_type(existing_type, autogen_context),
)
if server_default is not False:
rendered = _render_server_default(server_default, autogen_context)
text += ",\n%sserver_default=%s" % (indent, rendered)
if newname is not None:
text += ",\n%snew_column_name=%r" % (indent, newname)
if type_ is not None:
text += ",\n%stype_=%s" % (indent, _repr_type(type_, autogen_context))
if nullable is not None:
text += ",\n%snullable=%r" % (indent, nullable)
if comment is not False:
text += ",\n%scomment=%r" % (indent, comment)
if existing_comment is not None:
text += ",\n%sexisting_comment=%r" % (indent, existing_comment)
if nullable is None and existing_nullable is not None:
text += ",\n%sexisting_nullable=%r" % (indent, existing_nullable)
if autoincrement is not None:
text += ",\n%sautoincrement=%r" % (indent, autoincrement)
if server_default is False and existing_server_default:
rendered = _render_server_default(
existing_server_default, autogen_context
)
text += ",\n%sexisting_server_default=%s" % (indent, rendered)
if schema and not autogen_context._has_batch:
text += ",\n%sschema=%r" % (indent, schema)
text += ")"
return text
class _f_name:
def __init__(self, prefix: str, name: conv) -> None:
self.prefix = prefix
self.name = name
def __repr__(self) -> str:
return "%sf(%r)" % (self.prefix, _ident(self.name))
def _ident(name: Optional[Union[quoted_name, str]]) -> Optional[str]:
"""produce a __repr__() object for a string identifier that may
use quoted_name() in SQLAlchemy 0.9 and greater.
The issue worked around here is that quoted_name() doesn't have
very good repr() behavior by itself when unicode is involved.
"""
if name is None:
return name
elif isinstance(name, quoted_name):
return str(name)
elif isinstance(name, str):
return name
def _render_potential_expr(
value: Any,
autogen_context: AutogenContext,
*,
wrap_in_element: bool = True,
is_server_default: bool = False,
is_index: bool = False,
) -> str:
if isinstance(value, sql.ClauseElement):
sql_text = autogen_context.migration_context.impl.render_ddl_sql_expr(
value, is_server_default=is_server_default, is_index=is_index
)
if wrap_in_element:
prefix = _sqlalchemy_autogenerate_prefix(autogen_context)
element = "literal_column" if is_index else "text"
value_str = f"{prefix}{element}({sql_text!r})"
if (
is_index
and isinstance(value, Label)
and type(value.name) is str
):
return value_str + f".label({value.name!r})"
else:
return value_str
else:
return repr(sql_text)
else:
return repr(value)
def _get_index_rendered_expressions(
idx: Index, autogen_context: AutogenContext
) -> List[str]:
return [
(
repr(_ident(getattr(exp, "name", None)))
if isinstance(exp, sa_schema.Column)
else _render_potential_expr(exp, autogen_context, is_index=True)
)
for exp in idx.expressions
]
def _uq_constraint(
constraint: UniqueConstraint,
autogen_context: AutogenContext,
alter: bool,
) -> str:
opts: List[Tuple[str, Any]] = []
has_batch = autogen_context._has_batch
if constraint.deferrable:
opts.append(("deferrable", constraint.deferrable))
if constraint.initially:
opts.append(("initially", constraint.initially))
if not has_batch and alter and constraint.table.schema:
opts.append(("schema", _ident(constraint.table.schema)))
if not alter and constraint.name:
opts.append(
("name", _render_gen_name(autogen_context, constraint.name))
)
dialect_options = _render_dialect_kwargs_items(autogen_context, constraint)
if alter:
args = [repr(_render_gen_name(autogen_context, constraint.name))]
if not has_batch:
args += [repr(_ident(constraint.table.name))]
args.append(repr([_ident(col.name) for col in constraint.columns]))
args.extend(["%s=%r" % (k, v) for k, v in opts])
args.extend(dialect_options)
return "%(prefix)screate_unique_constraint(%(args)s)" % {
"prefix": _alembic_autogenerate_prefix(autogen_context),
"args": ", ".join(args),
}
else:
args = [repr(_ident(col.name)) for col in constraint.columns]
args.extend(["%s=%r" % (k, v) for k, v in opts])
args.extend(dialect_options)
return "%(prefix)sUniqueConstraint(%(args)s)" % {
"prefix": _sqlalchemy_autogenerate_prefix(autogen_context),
"args": ", ".join(args),
}
def _user_autogenerate_prefix(autogen_context, target):
prefix = autogen_context.opts["user_module_prefix"]
if prefix is None:
return "%s." % target.__module__
else:
return prefix
def _sqlalchemy_autogenerate_prefix(autogen_context: AutogenContext) -> str:
return autogen_context.opts["sqlalchemy_module_prefix"] or ""
def _alembic_autogenerate_prefix(autogen_context: AutogenContext) -> str:
if autogen_context._has_batch:
return "batch_op."
else:
return autogen_context.opts["alembic_module_prefix"] or ""
def _user_defined_render(
type_: str, object_: Any, autogen_context: AutogenContext
) -> Union[str, Literal[False]]:
if "render_item" in autogen_context.opts:
render = autogen_context.opts["render_item"]
if render:
rendered = render(type_, object_, autogen_context)
if rendered is not False:
return rendered
return False
def _render_column(
column: Column[Any], autogen_context: AutogenContext
) -> str:
rendered = _user_defined_render("column", column, autogen_context)
if rendered is not False:
return rendered
args: List[str] = []
opts: List[Tuple[str, Any]] = []
if column.server_default:
rendered = _render_server_default( # type:ignore[assignment]
column.server_default, autogen_context
)
if rendered:
if _should_render_server_default_positionally(
column.server_default
):
args.append(rendered)
else:
opts.append(("server_default", rendered))
if (
column.autoincrement is not None
and column.autoincrement != sqla_compat.AUTOINCREMENT_DEFAULT
):
opts.append(("autoincrement", column.autoincrement))
if column.nullable is not None:
opts.append(("nullable", column.nullable))
if column.system:
opts.append(("system", column.system))
comment = column.comment
if comment:
opts.append(("comment", "%r" % comment))
# TODO: for non-ascii colname, assign a "key"
return "%(prefix)sColumn(%(name)r, %(type)s, %(args)s%(kwargs)s)" % {
"prefix": _sqlalchemy_autogenerate_prefix(autogen_context),
"name": _ident(column.name),
"type": _repr_type(column.type, autogen_context),
"args": ", ".join([str(arg) for arg in args]) + ", " if args else "",
"kwargs": (
", ".join(
["%s=%s" % (kwname, val) for kwname, val in opts]
+ [
"%s=%s"
% (key, _render_potential_expr(val, autogen_context))
for key, val in column.kwargs.items()
]
)
),
}
def _should_render_server_default_positionally(server_default: Any) -> bool:
return sqla_compat._server_default_is_computed(
server_default
) or sqla_compat._server_default_is_identity(server_default)
def _render_server_default(
default: Optional[
Union[FetchedValue, str, TextClause, ColumnElement[Any]]
],
autogen_context: AutogenContext,
repr_: bool = True,
) -> Optional[str]:
rendered = _user_defined_render("server_default", default, autogen_context)
if rendered is not False:
return rendered
if sqla_compat._server_default_is_computed(default):
return _render_computed(cast("Computed", default), autogen_context)
elif sqla_compat._server_default_is_identity(default):
return _render_identity(cast("Identity", default), autogen_context)
elif isinstance(default, sa_schema.DefaultClause):
if isinstance(default.arg, str):
default = default.arg
else:
return _render_potential_expr(
default.arg, autogen_context, is_server_default=True
)
if isinstance(default, str) and repr_:
default = repr(re.sub(r"^'|'$", "", default))
return cast(str, default)
def _render_computed(
computed: Computed, autogen_context: AutogenContext
) -> str:
text = _render_potential_expr(
computed.sqltext, autogen_context, wrap_in_element=False
)
kwargs = {}
if computed.persisted is not None:
kwargs["persisted"] = computed.persisted
return "%(prefix)sComputed(%(text)s, %(kwargs)s)" % {
"prefix": _sqlalchemy_autogenerate_prefix(autogen_context),
"text": text,
"kwargs": (", ".join("%s=%s" % pair for pair in kwargs.items())),
}
def _render_identity(
identity: Identity, autogen_context: AutogenContext
) -> str:
kwargs = sqla_compat._get_identity_options_dict(
identity, dialect_kwargs=True
)
return "%(prefix)sIdentity(%(kwargs)s)" % {
"prefix": _sqlalchemy_autogenerate_prefix(autogen_context),
"kwargs": (", ".join("%s=%s" % pair for pair in kwargs.items())),
}
def _repr_type(
type_: TypeEngine,
autogen_context: AutogenContext,
_skip_variants: bool = False,
) -> str:
rendered = _user_defined_render("type", type_, autogen_context)
if rendered is not False:
return rendered
if hasattr(autogen_context.migration_context, "impl"):
impl_rt = autogen_context.migration_context.impl.render_type(
type_, autogen_context
)
else:
impl_rt = None
mod = type(type_).__module__
imports = autogen_context.imports
if not _skip_variants and sqla_compat._type_has_variants(type_):
return _render_Variant_type(type_, autogen_context)
elif mod.startswith("sqlalchemy.dialects"):
match = re.match(r"sqlalchemy\.dialects\.(\w+)", mod)
assert match is not None
dname = match.group(1)
if imports is not None:
imports.add("from sqlalchemy.dialects import %s" % dname)
if impl_rt:
return impl_rt
else:
return "%s.%r" % (dname, type_)
elif impl_rt:
return impl_rt
elif mod.startswith("sqlalchemy."):
if "_render_%s_type" % type_.__visit_name__ in globals():
fn = globals()["_render_%s_type" % type_.__visit_name__]
return fn(type_, autogen_context)
else:
prefix = _sqlalchemy_autogenerate_prefix(autogen_context)
return "%s%r" % (prefix, type_)
else:
prefix = _user_autogenerate_prefix(autogen_context, type_)
return "%s%r" % (prefix, type_)
def _render_ARRAY_type(type_: ARRAY, autogen_context: AutogenContext) -> str:
return cast(
str,
_render_type_w_subtype(
type_, autogen_context, "item_type", r"(.+?\()"
),
)
def _render_Variant_type(
type_: TypeEngine, autogen_context: AutogenContext
) -> str:
base_type, variant_mapping = sqla_compat._get_variant_mapping(type_)
base = _repr_type(base_type, autogen_context, _skip_variants=True)
assert base is not None and base is not False # type: ignore[comparison-overlap] # noqa:E501
for dialect in sorted(variant_mapping):
typ = variant_mapping[dialect]
base += ".with_variant(%s, %r)" % (
_repr_type(typ, autogen_context, _skip_variants=True),
dialect,
)
return base
def _render_type_w_subtype(
type_: TypeEngine,
autogen_context: AutogenContext,
attrname: str,
regexp: str,
prefix: Optional[str] = None,
) -> Union[Optional[str], Literal[False]]:
outer_repr = repr(type_)
inner_type = getattr(type_, attrname, None)
if inner_type is None:
return False
inner_repr = repr(inner_type)
inner_repr = re.sub(r"([\(\)])", r"\\\1", inner_repr)
sub_type = _repr_type(getattr(type_, attrname), autogen_context)
outer_type = re.sub(regexp + inner_repr, r"\1%s" % sub_type, outer_repr)
if prefix:
return "%s%s" % (prefix, outer_type)
mod = type(type_).__module__
if mod.startswith("sqlalchemy.dialects"):
match = re.match(r"sqlalchemy\.dialects\.(\w+)", mod)
assert match is not None
dname = match.group(1)
return "%s.%s" % (dname, outer_type)
elif mod.startswith("sqlalchemy"):
prefix = _sqlalchemy_autogenerate_prefix(autogen_context)
return "%s%s" % (prefix, outer_type)
else:
return None
_constraint_renderers = util.Dispatcher()
def _render_constraint(
constraint: Constraint,
autogen_context: AutogenContext,
namespace_metadata: Optional[MetaData],
) -> Optional[str]:
try:
renderer = _constraint_renderers.dispatch(constraint)
except ValueError:
util.warn("No renderer is established for object %r" % constraint)
return "[Unknown Python object %r]" % constraint
else:
return renderer(constraint, autogen_context, namespace_metadata)
@_constraint_renderers.dispatch_for(sa_schema.PrimaryKeyConstraint)
def _render_primary_key(
constraint: PrimaryKeyConstraint,
autogen_context: AutogenContext,
namespace_metadata: Optional[MetaData],
) -> Optional[str]:
rendered = _user_defined_render("primary_key", constraint, autogen_context)
if rendered is not False:
return rendered
if not constraint.columns:
return None
opts = []
if constraint.name:
opts.append(
("name", repr(_render_gen_name(autogen_context, constraint.name)))
)
return "%(prefix)sPrimaryKeyConstraint(%(args)s)" % {
"prefix": _sqlalchemy_autogenerate_prefix(autogen_context),
"args": ", ".join(
[repr(c.name) for c in constraint.columns]
+ ["%s=%s" % (kwname, val) for kwname, val in opts]
),
}
def _fk_colspec(
fk: ForeignKey,
metadata_schema: Optional[str],
namespace_metadata: MetaData,
) -> str:
"""Implement a 'safe' version of ForeignKey._get_colspec() that
won't fail if the remote table can't be resolved.
"""
colspec = fk._get_colspec()
tokens = colspec.split(".")
tname, colname = tokens[-2:]
if metadata_schema is not None and len(tokens) == 2:
table_fullname = "%s.%s" % (metadata_schema, tname)
else:
table_fullname = ".".join(tokens[0:-1])
if (
not fk.link_to_name
and fk.parent is not None
and fk.parent.table is not None
):
# try to resolve the remote table in order to adjust for column.key.
# the FK constraint needs to be rendered in terms of the column
# name.
if table_fullname in namespace_metadata.tables:
col = namespace_metadata.tables[table_fullname].c.get(colname)
if col is not None:
colname = _ident(col.name) # type: ignore[assignment]
colspec = "%s.%s" % (table_fullname, colname)
return colspec
def _populate_render_fk_opts(
constraint: ForeignKeyConstraint, opts: List[Tuple[str, str]]
) -> None:
if constraint.onupdate:
opts.append(("onupdate", repr(constraint.onupdate)))
if constraint.ondelete:
opts.append(("ondelete", repr(constraint.ondelete)))
if constraint.initially:
opts.append(("initially", repr(constraint.initially)))
if constraint.deferrable:
opts.append(("deferrable", repr(constraint.deferrable)))
if constraint.use_alter:
opts.append(("use_alter", repr(constraint.use_alter)))
if constraint.match:
opts.append(("match", repr(constraint.match)))
@_constraint_renderers.dispatch_for(sa_schema.ForeignKeyConstraint)
def _render_foreign_key(
constraint: ForeignKeyConstraint,
autogen_context: AutogenContext,
namespace_metadata: MetaData,
) -> Optional[str]:
rendered = _user_defined_render("foreign_key", constraint, autogen_context)
if rendered is not False:
return rendered
opts = []
if constraint.name:
opts.append(
("name", repr(_render_gen_name(autogen_context, constraint.name)))
)
_populate_render_fk_opts(constraint, opts)
apply_metadata_schema = namespace_metadata.schema
return (
"%(prefix)sForeignKeyConstraint([%(cols)s], "
"[%(refcols)s], %(args)s)"
% {
"prefix": _sqlalchemy_autogenerate_prefix(autogen_context),
"cols": ", ".join(
repr(_ident(f.parent.name)) for f in constraint.elements
),
"refcols": ", ".join(
repr(_fk_colspec(f, apply_metadata_schema, namespace_metadata))
for f in constraint.elements
),
"args": ", ".join(
["%s=%s" % (kwname, val) for kwname, val in opts]
),
}
)
@_constraint_renderers.dispatch_for(sa_schema.UniqueConstraint)
def _render_unique_constraint(
constraint: UniqueConstraint,
autogen_context: AutogenContext,
namespace_metadata: Optional[MetaData],
) -> str:
rendered = _user_defined_render("unique", constraint, autogen_context)
if rendered is not False:
return rendered
return _uq_constraint(constraint, autogen_context, False)
@_constraint_renderers.dispatch_for(sa_schema.CheckConstraint)
def _render_check_constraint(
constraint: CheckConstraint,
autogen_context: AutogenContext,
namespace_metadata: Optional[MetaData],
) -> Optional[str]:
rendered = _user_defined_render("check", constraint, autogen_context)
if rendered is not False:
return rendered
# detect the constraint being part of
# a parent type which is probably in the Table already.
# ideally SQLAlchemy would give us more of a first class
# way to detect this.
if (
constraint._create_rule
and hasattr(constraint._create_rule, "target")
and isinstance(
constraint._create_rule.target,
sqltypes.TypeEngine,
)
):
return None
opts = []
if constraint.name:
opts.append(
("name", repr(_render_gen_name(autogen_context, constraint.name)))
)
return "%(prefix)sCheckConstraint(%(sqltext)s%(opts)s)" % {
"prefix": _sqlalchemy_autogenerate_prefix(autogen_context),
"opts": (
", " + (", ".join("%s=%s" % (k, v) for k, v in opts))
if opts
else ""
),
"sqltext": _render_potential_expr(
constraint.sqltext, autogen_context, wrap_in_element=False
),
}
@renderers.dispatch_for(ops.ExecuteSQLOp)
def _execute_sql(autogen_context: AutogenContext, op: ops.ExecuteSQLOp) -> str:
if not isinstance(op.sqltext, str):
raise NotImplementedError(
"Autogenerate rendering of SQL Expression language constructs "
"not supported here; please use a plain SQL string"
)
return "op.execute(%r)" % op.sqltext
renderers = default_renderers.branch()
|
from __future__ import annotations
from typing import Any
from typing import Callable
from typing import Iterator
from typing import List
from typing import Tuple
from typing import Type
from typing import TYPE_CHECKING
from typing import Union
from .. import util
from ..operations import ops
if TYPE_CHECKING:
from ..operations.ops import AddColumnOp
from ..operations.ops import AlterColumnOp
from ..operations.ops import CreateTableOp
from ..operations.ops import DowngradeOps
from ..operations.ops import MigrateOperation
from ..operations.ops import MigrationScript
from ..operations.ops import ModifyTableOps
from ..operations.ops import OpContainer
from ..operations.ops import UpgradeOps
from ..runtime.migration import MigrationContext
from ..script.revision import _GetRevArg
ProcessRevisionDirectiveFn = Callable[
["MigrationContext", "_GetRevArg", List["MigrationScript"]], None
]
class Rewriter:
"""A helper object that allows easy 'rewriting' of ops streams.
The :class:`.Rewriter` object is intended to be passed along
to the
:paramref:`.EnvironmentContext.configure.process_revision_directives`
parameter in an ``env.py`` script. Once constructed, any number
of "rewrites" functions can be associated with it, which will be given
the opportunity to modify the structure without having to have explicit
knowledge of the overall structure.
The function is passed the :class:`.MigrationContext` object and
``revision`` tuple that are passed to the :paramref:`.Environment
Context.configure.process_revision_directives` function normally,
and the third argument is an individual directive of the type
noted in the decorator. The function has the choice of returning
a single op directive, which normally can be the directive that
was actually passed, or a new directive to replace it, or a list
of zero or more directives to replace it.
.. seealso::
:ref:`autogen_rewriter` - usage example
"""
_traverse = util.Dispatcher()
_chained: Tuple[Union[ProcessRevisionDirectiveFn, Rewriter], ...] = ()
def __init__(self) -> None:
self.dispatch = util.Dispatcher()
def chain(
self,
other: Union[
ProcessRevisionDirectiveFn,
Rewriter,
],
) -> Rewriter:
"""Produce a "chain" of this :class:`.Rewriter` to another.
This allows two or more rewriters to operate serially on a stream,
e.g.::
writer1 = autogenerate.Rewriter()
writer2 = autogenerate.Rewriter()
@writer1.rewrites(ops.AddColumnOp)
def add_column_nullable(context, revision, op):
op.column.nullable = True
return op
@writer2.rewrites(ops.AddColumnOp)
def add_column_idx(context, revision, op):
idx_op = ops.CreateIndexOp(
"ixc", op.table_name, [op.column.name]
)
return [op, idx_op]
writer = writer1.chain(writer2)
:param other: a :class:`.Rewriter` instance
:return: a new :class:`.Rewriter` that will run the operations
of this writer, then the "other" writer, in succession.
"""
wr = self.__class__.__new__(self.__class__)
wr.__dict__.update(self.__dict__)
wr._chained += (other,)
return wr
def rewrites(
self,
operator: Union[
Type[AddColumnOp],
Type[MigrateOperation],
Type[AlterColumnOp],
Type[CreateTableOp],
Type[ModifyTableOps],
],
) -> Callable[..., Any]:
"""Register a function as rewriter for a given type.
The function should receive three arguments, which are
the :class:`.MigrationContext`, a ``revision`` tuple, and
an op directive of the type indicated. E.g.::
@writer1.rewrites(ops.AddColumnOp)
def add_column_nullable(context, revision, op):
op.column.nullable = True
return op
"""
return self.dispatch.dispatch_for(operator)
def _rewrite(
self,
context: MigrationContext,
revision: _GetRevArg,
directive: MigrateOperation,
) -> Iterator[MigrateOperation]:
try:
_rewriter = self.dispatch.dispatch(directive)
except ValueError:
_rewriter = None
yield directive
else:
if self in directive._mutations:
yield directive
else:
for r_directive in util.to_list(
_rewriter(context, revision, directive), []
):
r_directive._mutations = r_directive._mutations.union(
[self]
)
yield r_directive
def __call__(
self,
context: MigrationContext,
revision: _GetRevArg,
directives: List[MigrationScript],
) -> None:
self.process_revision_directives(context, revision, directives)
for process_revision_directives in self._chained:
process_revision_directives(context, revision, directives)
@_traverse.dispatch_for(ops.MigrationScript)
def _traverse_script(
self,
context: MigrationContext,
revision: _GetRevArg,
directive: MigrationScript,
) -> None:
upgrade_ops_list: List[UpgradeOps] = []
for upgrade_ops in directive.upgrade_ops_list:
ret = self._traverse_for(context, revision, upgrade_ops)
if len(ret) != 1:
raise ValueError(
"Can only return single object for UpgradeOps traverse"
)
upgrade_ops_list.append(ret[0])
directive.upgrade_ops = upgrade_ops_list # type: ignore
downgrade_ops_list: List[DowngradeOps] = []
for downgrade_ops in directive.downgrade_ops_list:
ret = self._traverse_for(context, revision, downgrade_ops)
if len(ret) != 1:
raise ValueError(
"Can only return single object for DowngradeOps traverse"
)
downgrade_ops_list.append(ret[0])
directive.downgrade_ops = downgrade_ops_list # type: ignore
@_traverse.dispatch_for(ops.OpContainer)
def _traverse_op_container(
self,
context: MigrationContext,
revision: _GetRevArg,
directive: OpContainer,
) -> None:
self._traverse_list(context, revision, directive.ops)
@_traverse.dispatch_for(ops.MigrateOperation)
def _traverse_any_directive(
self,
context: MigrationContext,
revision: _GetRevArg,
directive: MigrateOperation,
) -> None:
pass
def _traverse_for(
self,
context: MigrationContext,
revision: _GetRevArg,
directive: MigrateOperation,
) -> Any:
directives = list(self._rewrite(context, revision, directive))
for directive in directives:
traverser = self._traverse.dispatch(directive)
traverser(self, context, revision, directive)
return directives
def _traverse_list(
self,
context: MigrationContext,
revision: _GetRevArg,
directives: Any,
) -> None:
dest = []
for directive in directives:
dest.extend(self._traverse_for(context, revision, directive))
directives[:] = dest
def process_revision_directives(
self,
context: MigrationContext,
revision: _GetRevArg,
directives: List[MigrationScript],
) -> None:
self._traverse_list(context, revision, directives)
|
from .api import _render_migration_diffs as _render_migration_diffs
from .api import compare_metadata as compare_metadata
from .api import produce_migrations as produce_migrations
from .api import render_python_code as render_python_code
from .api import RevisionContext as RevisionContext
from .compare import _produce_net_changes as _produce_net_changes
from .compare import comparators as comparators
from .render import render_op_text as render_op_text
from .render import renderers as renderers
from .rewriter import Rewriter as Rewriter
|
# mypy: allow-untyped-defs, allow-incomplete-defs, allow-untyped-calls
# mypy: no-warn-return-any, allow-any-generics
from __future__ import annotations
import functools
from typing import Optional
from typing import TYPE_CHECKING
from typing import Union
from sqlalchemy import exc
from sqlalchemy import Integer
from sqlalchemy import types as sqltypes
from sqlalchemy.ext.compiler import compiles
from sqlalchemy.schema import Column
from sqlalchemy.schema import DDLElement
from sqlalchemy.sql.elements import quoted_name
from ..util.sqla_compat import _columns_for_constraint # noqa
from ..util.sqla_compat import _find_columns # noqa
from ..util.sqla_compat import _fk_spec # noqa
from ..util.sqla_compat import _is_type_bound # noqa
from ..util.sqla_compat import _table_for_constraint # noqa
if TYPE_CHECKING:
from typing import Any
from sqlalchemy import Computed
from sqlalchemy import Identity
from sqlalchemy.sql.compiler import Compiled
from sqlalchemy.sql.compiler import DDLCompiler
from sqlalchemy.sql.elements import TextClause
from sqlalchemy.sql.functions import Function
from sqlalchemy.sql.schema import FetchedValue
from sqlalchemy.sql.type_api import TypeEngine
from .impl import DefaultImpl
_ServerDefault = Union["TextClause", "FetchedValue", "Function[Any]", str]
class AlterTable(DDLElement):
"""Represent an ALTER TABLE statement.
Only the string name and optional schema name of the table
is required, not a full Table object.
"""
def __init__(
self,
table_name: str,
schema: Optional[Union[quoted_name, str]] = None,
) -> None:
self.table_name = table_name
self.schema = schema
class RenameTable(AlterTable):
def __init__(
self,
old_table_name: str,
new_table_name: Union[quoted_name, str],
schema: Optional[Union[quoted_name, str]] = None,
) -> None:
super().__init__(old_table_name, schema=schema)
self.new_table_name = new_table_name
class AlterColumn(AlterTable):
def __init__(
self,
name: str,
column_name: str,
schema: Optional[str] = None,
existing_type: Optional[TypeEngine] = None,
existing_nullable: Optional[bool] = None,
existing_server_default: Optional[_ServerDefault] = None,
existing_comment: Optional[str] = None,
) -> None:
super().__init__(name, schema=schema)
self.column_name = column_name
self.existing_type = (
sqltypes.to_instance(existing_type)
if existing_type is not None
else None
)
self.existing_nullable = existing_nullable
self.existing_server_default = existing_server_default
self.existing_comment = existing_comment
class ColumnNullable(AlterColumn):
def __init__(
self, name: str, column_name: str, nullable: bool, **kw
) -> None:
super().__init__(name, column_name, **kw)
self.nullable = nullable
class ColumnType(AlterColumn):
def __init__(
self, name: str, column_name: str, type_: TypeEngine, **kw
) -> None:
super().__init__(name, column_name, **kw)
self.type_ = sqltypes.to_instance(type_)
class ColumnName(AlterColumn):
def __init__(
self, name: str, column_name: str, newname: str, **kw
) -> None:
super().__init__(name, column_name, **kw)
self.newname = newname
class ColumnDefault(AlterColumn):
def __init__(
self,
name: str,
column_name: str,
default: Optional[_ServerDefault],
**kw,
) -> None:
super().__init__(name, column_name, **kw)
self.default = default
class ComputedColumnDefault(AlterColumn):
def __init__(
self, name: str, column_name: str, default: Optional[Computed], **kw
) -> None:
super().__init__(name, column_name, **kw)
self.default = default
class IdentityColumnDefault(AlterColumn):
def __init__(
self,
name: str,
column_name: str,
default: Optional[Identity],
impl: DefaultImpl,
**kw,
) -> None:
super().__init__(name, column_name, **kw)
self.default = default
self.impl = impl
class AddColumn(AlterTable):
def __init__(
self,
name: str,
column: Column[Any],
schema: Optional[Union[quoted_name, str]] = None,
) -> None:
super().__init__(name, schema=schema)
self.column = column
class DropColumn(AlterTable):
def __init__(
self, name: str, column: Column[Any], schema: Optional[str] = None
) -> None:
super().__init__(name, schema=schema)
self.column = column
class ColumnComment(AlterColumn):
def __init__(
self, name: str, column_name: str, comment: Optional[str], **kw
) -> None:
super().__init__(name, column_name, **kw)
self.comment = comment
@compiles(RenameTable)
def visit_rename_table(
element: RenameTable, compiler: DDLCompiler, **kw
) -> str:
return "%s RENAME TO %s" % (
alter_table(compiler, element.table_name, element.schema),
format_table_name(compiler, element.new_table_name, element.schema),
)
@compiles(AddColumn)
def visit_add_column(element: AddColumn, compiler: DDLCompiler, **kw) -> str:
return "%s %s" % (
alter_table(compiler, element.table_name, element.schema),
add_column(compiler, element.column, **kw),
)
@compiles(DropColumn)
def visit_drop_column(element: DropColumn, compiler: DDLCompiler, **kw) -> str:
return "%s %s" % (
alter_table(compiler, element.table_name, element.schema),
drop_column(compiler, element.column.name, **kw),
)
@compiles(ColumnNullable)
def visit_column_nullable(
element: ColumnNullable, compiler: DDLCompiler, **kw
) -> str:
return "%s %s %s" % (
alter_table(compiler, element.table_name, element.schema),
alter_column(compiler, element.column_name),
"DROP NOT NULL" if element.nullable else "SET NOT NULL",
)
@compiles(ColumnType)
def visit_column_type(element: ColumnType, compiler: DDLCompiler, **kw) -> str:
return "%s %s %s" % (
alter_table(compiler, element.table_name, element.schema),
alter_column(compiler, element.column_name),
"TYPE %s" % format_type(compiler, element.type_),
)
@compiles(ColumnName)
def visit_column_name(element: ColumnName, compiler: DDLCompiler, **kw) -> str:
return "%s RENAME %s TO %s" % (
alter_table(compiler, element.table_name, element.schema),
format_column_name(compiler, element.column_name),
format_column_name(compiler, element.newname),
)
@compiles(ColumnDefault)
def visit_column_default(
element: ColumnDefault, compiler: DDLCompiler, **kw
) -> str:
return "%s %s %s" % (
alter_table(compiler, element.table_name, element.schema),
alter_column(compiler, element.column_name),
(
"SET DEFAULT %s" % format_server_default(compiler, element.default)
if element.default is not None
else "DROP DEFAULT"
),
)
@compiles(ComputedColumnDefault)
def visit_computed_column(
element: ComputedColumnDefault, compiler: DDLCompiler, **kw
):
raise exc.CompileError(
'Adding or removing a "computed" construct, e.g. GENERATED '
"ALWAYS AS, to or from an existing column is not supported."
)
@compiles(IdentityColumnDefault)
def visit_identity_column(
element: IdentityColumnDefault, compiler: DDLCompiler, **kw
):
raise exc.CompileError(
'Adding, removing or modifying an "identity" construct, '
"e.g. GENERATED AS IDENTITY, to or from an existing "
"column is not supported in this dialect."
)
def quote_dotted(
name: Union[quoted_name, str], quote: functools.partial
) -> Union[quoted_name, str]:
"""quote the elements of a dotted name"""
if isinstance(name, quoted_name):
return quote(name)
result = ".".join([quote(x) for x in name.split(".")])
return result
def format_table_name(
compiler: Compiled,
name: Union[quoted_name, str],
schema: Optional[Union[quoted_name, str]],
) -> Union[quoted_name, str]:
quote = functools.partial(compiler.preparer.quote)
if schema:
return quote_dotted(schema, quote) + "." + quote(name)
else:
return quote(name)
def format_column_name(
compiler: DDLCompiler, name: Optional[Union[quoted_name, str]]
) -> Union[quoted_name, str]:
return compiler.preparer.quote(name) # type: ignore[arg-type]
def format_server_default(
compiler: DDLCompiler,
default: Optional[_ServerDefault],
) -> str:
# this can be updated to use compiler.render_default_string
# for SQLAlchemy 2.0 and above; not in 1.4
default_str = compiler.get_column_default_string(
Column("x", Integer, server_default=default)
)
assert default_str is not None
return default_str
def format_type(compiler: DDLCompiler, type_: TypeEngine) -> str:
return compiler.dialect.type_compiler.process(type_)
def alter_table(
compiler: DDLCompiler,
name: str,
schema: Optional[str],
) -> str:
return "ALTER TABLE %s" % format_table_name(compiler, name, schema)
def drop_column(compiler: DDLCompiler, name: str, **kw) -> str:
return "DROP COLUMN %s" % format_column_name(compiler, name)
def alter_column(compiler: DDLCompiler, name: str) -> str:
return "ALTER COLUMN %s" % format_column_name(compiler, name)
def add_column(compiler: DDLCompiler, column: Column[Any], **kw) -> str:
text = "ADD COLUMN %s" % compiler.get_column_specification(column, **kw)
const = " ".join(
compiler.process(constraint) for constraint in column.constraints
)
if const:
text += " " + const
return text
|
# mypy: allow-untyped-defs, allow-incomplete-defs, allow-untyped-calls
# mypy: no-warn-return-any, allow-any-generics
from __future__ import annotations
import logging
import re
from typing import Any
from typing import Callable
from typing import Dict
from typing import Iterable
from typing import List
from typing import Mapping
from typing import NamedTuple
from typing import Optional
from typing import Sequence
from typing import Set
from typing import Tuple
from typing import Type
from typing import TYPE_CHECKING
from typing import Union
from sqlalchemy import cast
from sqlalchemy import Column
from sqlalchemy import MetaData
from sqlalchemy import PrimaryKeyConstraint
from sqlalchemy import schema
from sqlalchemy import String
from sqlalchemy import Table
from sqlalchemy import text
from . import _autogen
from . import base
from ._autogen import _constraint_sig as _constraint_sig
from ._autogen import ComparisonResult as ComparisonResult
from .. import util
from ..util import sqla_compat
if TYPE_CHECKING:
from typing import Literal
from typing import TextIO
from sqlalchemy.engine import Connection
from sqlalchemy.engine import Dialect
from sqlalchemy.engine.cursor import CursorResult
from sqlalchemy.engine.reflection import Inspector
from sqlalchemy.sql import ClauseElement
from sqlalchemy.sql import Executable
from sqlalchemy.sql.elements import quoted_name
from sqlalchemy.sql.schema import Constraint
from sqlalchemy.sql.schema import ForeignKeyConstraint
from sqlalchemy.sql.schema import Index
from sqlalchemy.sql.schema import UniqueConstraint
from sqlalchemy.sql.selectable import TableClause
from sqlalchemy.sql.type_api import TypeEngine
from .base import _ServerDefault
from ..autogenerate.api import AutogenContext
from ..operations.batch import ApplyBatchImpl
from ..operations.batch import BatchOperationsImpl
log = logging.getLogger(__name__)
class ImplMeta(type):
def __init__(
cls,
classname: str,
bases: Tuple[Type[DefaultImpl]],
dict_: Dict[str, Any],
):
newtype = type.__init__(cls, classname, bases, dict_)
if "__dialect__" in dict_:
_impls[dict_["__dialect__"]] = cls # type: ignore[assignment]
return newtype
_impls: Dict[str, Type[DefaultImpl]] = {}
class DefaultImpl(metaclass=ImplMeta):
"""Provide the entrypoint for major migration operations,
including database-specific behavioral variances.
While individual SQL/DDL constructs already provide
for database-specific implementations, variances here
allow for entirely different sequences of operations
to take place for a particular migration, such as
SQL Server's special 'IDENTITY INSERT' step for
bulk inserts.
"""
__dialect__ = "default"
transactional_ddl = False
command_terminator = ";"
type_synonyms: Tuple[Set[str], ...] = ({"NUMERIC", "DECIMAL"},)
type_arg_extract: Sequence[str] = ()
# These attributes are deprecated in SQLAlchemy via #10247. They need to
# be ignored to support older version that did not use dialect kwargs.
# They only apply to Oracle and are replaced by oracle_order,
# oracle_on_null
identity_attrs_ignore: Tuple[str, ...] = ("order", "on_null")
def __init__(
self,
dialect: Dialect,
connection: Optional[Connection],
as_sql: bool,
transactional_ddl: Optional[bool],
output_buffer: Optional[TextIO],
context_opts: Dict[str, Any],
) -> None:
self.dialect = dialect
self.connection = connection
self.as_sql = as_sql
self.literal_binds = context_opts.get("literal_binds", False)
self.output_buffer = output_buffer
self.memo: dict = {}
self.context_opts = context_opts
if transactional_ddl is not None:
self.transactional_ddl = transactional_ddl
if self.literal_binds:
if not self.as_sql:
raise util.CommandError(
"Can't use literal_binds setting without as_sql mode"
)
@classmethod
def get_by_dialect(cls, dialect: Dialect) -> Type[DefaultImpl]:
return _impls[dialect.name]
def static_output(self, text: str) -> None:
assert self.output_buffer is not None
self.output_buffer.write(text + "\n\n")
self.output_buffer.flush()
def version_table_impl(
self,
*,
version_table: str,
version_table_schema: Optional[str],
version_table_pk: bool,
**kw: Any,
) -> Table:
"""Generate a :class:`.Table` object which will be used as the
structure for the Alembic version table.
Third party dialects may override this hook to provide an alternate
structure for this :class:`.Table`; requirements are only that it
be named based on the ``version_table`` parameter and contains
at least a single string-holding column named ``version_num``.
.. versionadded:: 1.14
"""
vt = Table(
version_table,
MetaData(),
Column("version_num", String(32), nullable=False),
schema=version_table_schema,
)
if version_table_pk:
vt.append_constraint(
PrimaryKeyConstraint(
"version_num", name=f"{version_table}_pkc"
)
)
return vt
def requires_recreate_in_batch(
self, batch_op: BatchOperationsImpl
) -> bool:
"""Return True if the given :class:`.BatchOperationsImpl`
would need the table to be recreated and copied in order to
proceed.
Normally, only returns True on SQLite when operations other
than add_column are present.
"""
return False
def prep_table_for_batch(
self, batch_impl: ApplyBatchImpl, table: Table
) -> None:
"""perform any operations needed on a table before a new
one is created to replace it in batch mode.
the PG dialect uses this to drop constraints on the table
before the new one uses those same names.
"""
@property
def bind(self) -> Optional[Connection]:
return self.connection
def _exec(
self,
construct: Union[Executable, str],
execution_options: Optional[Mapping[str, Any]] = None,
multiparams: Optional[Sequence[Mapping[str, Any]]] = None,
params: Mapping[str, Any] = util.immutabledict(),
) -> Optional[CursorResult]:
if isinstance(construct, str):
construct = text(construct)
if self.as_sql:
if multiparams is not None or params:
raise TypeError("SQL parameters not allowed with as_sql")
compile_kw: dict[str, Any]
if self.literal_binds and not isinstance(
construct, schema.DDLElement
):
compile_kw = dict(compile_kwargs={"literal_binds": True})
else:
compile_kw = {}
if TYPE_CHECKING:
assert isinstance(construct, ClauseElement)
compiled = construct.compile(dialect=self.dialect, **compile_kw)
self.static_output(
str(compiled).replace("\t", " ").strip()
+ self.command_terminator
)
return None
else:
conn = self.connection
assert conn is not None
if execution_options:
conn = conn.execution_options(**execution_options)
if params and multiparams is not None:
raise TypeError(
"Can't send params and multiparams at the same time"
)
if multiparams:
return conn.execute(construct, multiparams)
else:
return conn.execute(construct, params)
def execute(
self,
sql: Union[Executable, str],
execution_options: Optional[dict[str, Any]] = None,
) -> None:
self._exec(sql, execution_options)
def alter_column(
self,
table_name: str,
column_name: str,
nullable: Optional[bool] = None,
server_default: Union[_ServerDefault, Literal[False]] = False,
name: Optional[str] = None,
type_: Optional[TypeEngine] = None,
schema: Optional[str] = None,
autoincrement: Optional[bool] = None,
comment: Optional[Union[str, Literal[False]]] = False,
existing_comment: Optional[str] = None,
existing_type: Optional[TypeEngine] = None,
existing_server_default: Optional[_ServerDefault] = None,
existing_nullable: Optional[bool] = None,
existing_autoincrement: Optional[bool] = None,
**kw: Any,
) -> None:
if autoincrement is not None or existing_autoincrement is not None:
util.warn(
"autoincrement and existing_autoincrement "
"only make sense for MySQL",
stacklevel=3,
)
if nullable is not None:
self._exec(
base.ColumnNullable(
table_name,
column_name,
nullable,
schema=schema,
existing_type=existing_type,
existing_server_default=existing_server_default,
existing_nullable=existing_nullable,
existing_comment=existing_comment,
)
)
if server_default is not False:
kw = {}
cls_: Type[
Union[
base.ComputedColumnDefault,
base.IdentityColumnDefault,
base.ColumnDefault,
]
]
if sqla_compat._server_default_is_computed(
server_default, existing_server_default
):
cls_ = base.ComputedColumnDefault
elif sqla_compat._server_default_is_identity(
server_default, existing_server_default
):
cls_ = base.IdentityColumnDefault
kw["impl"] = self
else:
cls_ = base.ColumnDefault
self._exec(
cls_(
table_name,
column_name,
server_default, # type:ignore[arg-type]
schema=schema,
existing_type=existing_type,
existing_server_default=existing_server_default,
existing_nullable=existing_nullable,
existing_comment=existing_comment,
**kw,
)
)
if type_ is not None:
self._exec(
base.ColumnType(
table_name,
column_name,
type_,
schema=schema,
existing_type=existing_type,
existing_server_default=existing_server_default,
existing_nullable=existing_nullable,
existing_comment=existing_comment,
)
)
if comment is not False:
self._exec(
base.ColumnComment(
table_name,
column_name,
comment,
schema=schema,
existing_type=existing_type,
existing_server_default=existing_server_default,
existing_nullable=existing_nullable,
existing_comment=existing_comment,
)
)
# do the new name last ;)
if name is not None:
self._exec(
base.ColumnName(
table_name,
column_name,
name,
schema=schema,
existing_type=existing_type,
existing_server_default=existing_server_default,
existing_nullable=existing_nullable,
)
)
def add_column(
self,
table_name: str,
column: Column[Any],
schema: Optional[Union[str, quoted_name]] = None,
) -> None:
self._exec(base.AddColumn(table_name, column, schema=schema))
def drop_column(
self,
table_name: str,
column: Column[Any],
schema: Optional[str] = None,
**kw,
) -> None:
self._exec(base.DropColumn(table_name, column, schema=schema))
def add_constraint(self, const: Any) -> None:
if const._create_rule is None or const._create_rule(self):
self._exec(schema.AddConstraint(const))
def drop_constraint(self, const: Constraint) -> None:
self._exec(schema.DropConstraint(const))
def rename_table(
self,
old_table_name: str,
new_table_name: Union[str, quoted_name],
schema: Optional[Union[str, quoted_name]] = None,
) -> None:
self._exec(
base.RenameTable(old_table_name, new_table_name, schema=schema)
)
def create_table(self, table: Table, **kw: Any) -> None:
table.dispatch.before_create(
table, self.connection, checkfirst=False, _ddl_runner=self
)
self._exec(schema.CreateTable(table, **kw))
table.dispatch.after_create(
table, self.connection, checkfirst=False, _ddl_runner=self
)
for index in table.indexes:
self._exec(schema.CreateIndex(index))
with_comment = (
self.dialect.supports_comments and not self.dialect.inline_comments
)
comment = table.comment
if comment and with_comment:
self.create_table_comment(table)
for column in table.columns:
comment = column.comment
if comment and with_comment:
self.create_column_comment(column)
def drop_table(self, table: Table, **kw: Any) -> None:
table.dispatch.before_drop(
table, self.connection, checkfirst=False, _ddl_runner=self
)
self._exec(schema.DropTable(table, **kw))
table.dispatch.after_drop(
table, self.connection, checkfirst=False, _ddl_runner=self
)
def create_index(self, index: Index, **kw: Any) -> None:
self._exec(schema.CreateIndex(index, **kw))
def create_table_comment(self, table: Table) -> None:
self._exec(schema.SetTableComment(table))
def drop_table_comment(self, table: Table) -> None:
self._exec(schema.DropTableComment(table))
def create_column_comment(self, column: Column[Any]) -> None:
self._exec(schema.SetColumnComment(column))
def drop_index(self, index: Index, **kw: Any) -> None:
self._exec(schema.DropIndex(index, **kw))
def bulk_insert(
self,
table: Union[TableClause, Table],
rows: List[dict],
multiinsert: bool = True,
) -> None:
if not isinstance(rows, list):
raise TypeError("List expected")
elif rows and not isinstance(rows[0], dict):
raise TypeError("List of dictionaries expected")
if self.as_sql:
for row in rows:
self._exec(
table.insert()
.inline()
.values(
**{
k: (
sqla_compat._literal_bindparam(
k, v, type_=table.c[k].type
)
if not isinstance(
v, sqla_compat._literal_bindparam
)
else v
)
for k, v in row.items()
}
)
)
else:
if rows:
if multiinsert:
self._exec(table.insert().inline(), multiparams=rows)
else:
for row in rows:
self._exec(table.insert().inline().values(**row))
def _tokenize_column_type(self, column: Column) -> Params:
definition: str
definition = self.dialect.type_compiler.process(column.type).lower()
# tokenize the SQLAlchemy-generated version of a type, so that
# the two can be compared.
#
# examples:
# NUMERIC(10, 5)
# TIMESTAMP WITH TIMEZONE
# INTEGER UNSIGNED
# INTEGER (10) UNSIGNED
# INTEGER(10) UNSIGNED
# varchar character set utf8
#
tokens: List[str] = re.findall(r"[\w\-_]+|\(.+?\)", definition)
term_tokens: List[str] = []
paren_term = None
for token in tokens:
if re.match(r"^\(.*\)$", token):
paren_term = token
else:
term_tokens.append(token)
params = Params(term_tokens[0], term_tokens[1:], [], {})
if paren_term:
term: str
for term in re.findall("[^(),]+", paren_term):
if "=" in term:
key, val = term.split("=")
params.kwargs[key.strip()] = val.strip()
else:
params.args.append(term.strip())
return params
def _column_types_match(
self, inspector_params: Params, metadata_params: Params
) -> bool:
if inspector_params.token0 == metadata_params.token0:
return True
synonyms = [{t.lower() for t in batch} for batch in self.type_synonyms]
inspector_all_terms = " ".join(
[inspector_params.token0] + inspector_params.tokens
)
metadata_all_terms = " ".join(
[metadata_params.token0] + metadata_params.tokens
)
for batch in synonyms:
if {inspector_all_terms, metadata_all_terms}.issubset(batch) or {
inspector_params.token0,
metadata_params.token0,
}.issubset(batch):
return True
return False
def _column_args_match(
self, inspected_params: Params, meta_params: Params
) -> bool:
"""We want to compare column parameters. However, we only want
to compare parameters that are set. If they both have `collation`,
we want to make sure they are the same. However, if only one
specifies it, dont flag it for being less specific
"""
if (
len(meta_params.tokens) == len(inspected_params.tokens)
and meta_params.tokens != inspected_params.tokens
):
return False
if (
len(meta_params.args) == len(inspected_params.args)
and meta_params.args != inspected_params.args
):
return False
insp = " ".join(inspected_params.tokens).lower()
meta = " ".join(meta_params.tokens).lower()
for reg in self.type_arg_extract:
mi = re.search(reg, insp)
mm = re.search(reg, meta)
if mi and mm and mi.group(1) != mm.group(1):
return False
return True
def compare_type(
self, inspector_column: Column[Any], metadata_column: Column
) -> bool:
"""Returns True if there ARE differences between the types of the two
columns. Takes impl.type_synonyms into account between retrospected
and metadata types
"""
inspector_params = self._tokenize_column_type(inspector_column)
metadata_params = self._tokenize_column_type(metadata_column)
if not self._column_types_match(inspector_params, metadata_params):
return True
if not self._column_args_match(inspector_params, metadata_params):
return True
return False
def compare_server_default(
self,
inspector_column,
metadata_column,
rendered_metadata_default,
rendered_inspector_default,
):
return rendered_inspector_default != rendered_metadata_default
def correct_for_autogen_constraints(
self,
conn_uniques: Set[UniqueConstraint],
conn_indexes: Set[Index],
metadata_unique_constraints: Set[UniqueConstraint],
metadata_indexes: Set[Index],
) -> None:
pass
def cast_for_batch_migrate(self, existing, existing_transfer, new_type):
if existing.type._type_affinity is not new_type._type_affinity:
existing_transfer["expr"] = cast(
existing_transfer["expr"], new_type
)
def render_ddl_sql_expr(
self, expr: ClauseElement, is_server_default: bool = False, **kw: Any
) -> str:
"""Render a SQL expression that is typically a server default,
index expression, etc.
"""
compile_kw = {"literal_binds": True, "include_table": False}
return str(
expr.compile(dialect=self.dialect, compile_kwargs=compile_kw)
)
def _compat_autogen_column_reflect(self, inspector: Inspector) -> Callable:
return self.autogen_column_reflect
def correct_for_autogen_foreignkeys(
self,
conn_fks: Set[ForeignKeyConstraint],
metadata_fks: Set[ForeignKeyConstraint],
) -> None:
pass
def autogen_column_reflect(self, inspector, table, column_info):
"""A hook that is attached to the 'column_reflect' event for when
a Table is reflected from the database during the autogenerate
process.
Dialects can elect to modify the information gathered here.
"""
def start_migrations(self) -> None:
"""A hook called when :meth:`.EnvironmentContext.run_migrations`
is called.
Implementations can set up per-migration-run state here.
"""
def emit_begin(self) -> None:
"""Emit the string ``BEGIN``, or the backend-specific
equivalent, on the current connection context.
This is used in offline mode and typically
via :meth:`.EnvironmentContext.begin_transaction`.
"""
self.static_output("BEGIN" + self.command_terminator)
def emit_commit(self) -> None:
"""Emit the string ``COMMIT``, or the backend-specific
equivalent, on the current connection context.
This is used in offline mode and typically
via :meth:`.EnvironmentContext.begin_transaction`.
"""
self.static_output("COMMIT" + self.command_terminator)
def render_type(
self, type_obj: TypeEngine, autogen_context: AutogenContext
) -> Union[str, Literal[False]]:
return False
def _compare_identity_default(self, metadata_identity, inspector_identity):
# ignored contains the attributes that were not considered
# because assumed to their default values in the db.
diff, ignored = _compare_identity_options(
metadata_identity,
inspector_identity,
schema.Identity(),
skip={"always"},
)
meta_always = getattr(metadata_identity, "always", None)
inspector_always = getattr(inspector_identity, "always", None)
# None and False are the same in this comparison
if bool(meta_always) != bool(inspector_always):
diff.add("always")
diff.difference_update(self.identity_attrs_ignore)
# returns 3 values:
return (
# different identity attributes
diff,
# ignored identity attributes
ignored,
# if the two identity should be considered different
bool(diff) or bool(metadata_identity) != bool(inspector_identity),
)
def _compare_index_unique(
self, metadata_index: Index, reflected_index: Index
) -> Optional[str]:
conn_unique = bool(reflected_index.unique)
meta_unique = bool(metadata_index.unique)
if conn_unique != meta_unique:
return f"unique={conn_unique} to unique={meta_unique}"
else:
return None
def _create_metadata_constraint_sig(
self, constraint: _autogen._C, **opts: Any
) -> _constraint_sig[_autogen._C]:
return _constraint_sig.from_constraint(True, self, constraint, **opts)
def _create_reflected_constraint_sig(
self, constraint: _autogen._C, **opts: Any
) -> _constraint_sig[_autogen._C]:
return _constraint_sig.from_constraint(False, self, constraint, **opts)
def compare_indexes(
self,
metadata_index: Index,
reflected_index: Index,
) -> ComparisonResult:
"""Compare two indexes by comparing the signature generated by
``create_index_sig``.
This method returns a ``ComparisonResult``.
"""
msg: List[str] = []
unique_msg = self._compare_index_unique(
metadata_index, reflected_index
)
if unique_msg:
msg.append(unique_msg)
m_sig = self._create_metadata_constraint_sig(metadata_index)
r_sig = self._create_reflected_constraint_sig(reflected_index)
assert _autogen.is_index_sig(m_sig)
assert _autogen.is_index_sig(r_sig)
# The assumption is that the index have no expression
for sig in m_sig, r_sig:
if sig.has_expressions:
log.warning(
"Generating approximate signature for index %s. "
"The dialect "
"implementation should either skip expression indexes "
"or provide a custom implementation.",
sig.const,
)
if m_sig.column_names != r_sig.column_names:
msg.append(
f"expression {r_sig.column_names} to {m_sig.column_names}"
)
if msg:
return ComparisonResult.Different(msg)
else:
return ComparisonResult.Equal()
def compare_unique_constraint(
self,
metadata_constraint: UniqueConstraint,
reflected_constraint: UniqueConstraint,
) -> ComparisonResult:
"""Compare two unique constraints by comparing the two signatures.
The arguments are two tuples that contain the unique constraint and
the signatures generated by ``create_unique_constraint_sig``.
This method returns a ``ComparisonResult``.
"""
metadata_tup = self._create_metadata_constraint_sig(
metadata_constraint
)
reflected_tup = self._create_reflected_constraint_sig(
reflected_constraint
)
meta_sig = metadata_tup.unnamed
conn_sig = reflected_tup.unnamed
if conn_sig != meta_sig:
return ComparisonResult.Different(
f"expression {conn_sig} to {meta_sig}"
)
else:
return ComparisonResult.Equal()
def _skip_functional_indexes(self, metadata_indexes, conn_indexes):
conn_indexes_by_name = {c.name: c for c in conn_indexes}
for idx in list(metadata_indexes):
if idx.name in conn_indexes_by_name:
continue
iex = sqla_compat.is_expression_index(idx)
if iex:
util.warn(
"autogenerate skipping metadata-specified "
"expression-based index "
f"{idx.name!r}; dialect {self.__dialect__!r} under "
f"SQLAlchemy {sqla_compat.sqlalchemy_version} can't "
"reflect these indexes so they can't be compared"
)
metadata_indexes.discard(idx)
def adjust_reflected_dialect_options(
self, reflected_object: Dict[str, Any], kind: str
) -> Dict[str, Any]:
return reflected_object.get("dialect_options", {})
class Params(NamedTuple):
token0: str
tokens: List[str]
args: List[str]
kwargs: Dict[str, str]
def _compare_identity_options(
metadata_io: Union[schema.Identity, schema.Sequence, None],
inspector_io: Union[schema.Identity, schema.Sequence, None],
default_io: Union[schema.Identity, schema.Sequence],
skip: Set[str],
):
# this can be used for identity or sequence compare.
# default_io is an instance of IdentityOption with all attributes to the
# default value.
meta_d = sqla_compat._get_identity_options_dict(metadata_io)
insp_d = sqla_compat._get_identity_options_dict(inspector_io)
diff = set()
ignored_attr = set()
def check_dicts(
meta_dict: Mapping[str, Any],
insp_dict: Mapping[str, Any],
default_dict: Mapping[str, Any],
attrs: Iterable[str],
):
for attr in set(attrs).difference(skip):
meta_value = meta_dict.get(attr)
insp_value = insp_dict.get(attr)
if insp_value != meta_value:
default_value = default_dict.get(attr)
if meta_value == default_value:
ignored_attr.add(attr)
else:
diff.add(attr)
check_dicts(
meta_d,
insp_d,
sqla_compat._get_identity_options_dict(default_io),
set(meta_d).union(insp_d),
)
if sqla_compat.identity_has_dialect_kwargs:
assert hasattr(default_io, "dialect_kwargs")
# use only the dialect kwargs in inspector_io since metadata_io
# can have options for many backends
check_dicts(
getattr(metadata_io, "dialect_kwargs", {}),
getattr(inspector_io, "dialect_kwargs", {}),
default_io.dialect_kwargs,
getattr(inspector_io, "dialect_kwargs", {}),
)
return diff, ignored_attr
|
# mypy: allow-untyped-defs, allow-incomplete-defs, allow-untyped-calls
# mypy: no-warn-return-any, allow-any-generics
from __future__ import annotations
import re
from typing import Any
from typing import Dict
from typing import List
from typing import Optional
from typing import TYPE_CHECKING
from typing import Union
from sqlalchemy import types as sqltypes
from sqlalchemy.schema import Column
from sqlalchemy.schema import CreateIndex
from sqlalchemy.sql.base import Executable
from sqlalchemy.sql.elements import ClauseElement
from .base import AddColumn
from .base import alter_column
from .base import alter_table
from .base import ColumnDefault
from .base import ColumnName
from .base import ColumnNullable
from .base import ColumnType
from .base import format_column_name
from .base import format_server_default
from .base import format_table_name
from .base import format_type
from .base import RenameTable
from .impl import DefaultImpl
from .. import util
from ..util import sqla_compat
from ..util.sqla_compat import compiles
if TYPE_CHECKING:
from typing import Literal
from sqlalchemy.dialects.mssql.base import MSDDLCompiler
from sqlalchemy.dialects.mssql.base import MSSQLCompiler
from sqlalchemy.engine.cursor import CursorResult
from sqlalchemy.sql.schema import Index
from sqlalchemy.sql.schema import Table
from sqlalchemy.sql.selectable import TableClause
from sqlalchemy.sql.type_api import TypeEngine
from .base import _ServerDefault
class MSSQLImpl(DefaultImpl):
__dialect__ = "mssql"
transactional_ddl = True
batch_separator = "GO"
type_synonyms = DefaultImpl.type_synonyms + ({"VARCHAR", "NVARCHAR"},)
identity_attrs_ignore = DefaultImpl.identity_attrs_ignore + (
"minvalue",
"maxvalue",
"nominvalue",
"nomaxvalue",
"cycle",
"cache",
)
def __init__(self, *arg, **kw) -> None:
super().__init__(*arg, **kw)
self.batch_separator = self.context_opts.get(
"mssql_batch_separator", self.batch_separator
)
def _exec(self, construct: Any, *args, **kw) -> Optional[CursorResult]:
result = super()._exec(construct, *args, **kw)
if self.as_sql and self.batch_separator:
self.static_output(self.batch_separator)
return result
def emit_begin(self) -> None:
self.static_output("BEGIN TRANSACTION" + self.command_terminator)
def emit_commit(self) -> None:
super().emit_commit()
if self.as_sql and self.batch_separator:
self.static_output(self.batch_separator)
def alter_column( # type:ignore[override]
self,
table_name: str,
column_name: str,
nullable: Optional[bool] = None,
server_default: Optional[
Union[_ServerDefault, Literal[False]]
] = False,
name: Optional[str] = None,
type_: Optional[TypeEngine] = None,
schema: Optional[str] = None,
existing_type: Optional[TypeEngine] = None,
existing_server_default: Optional[_ServerDefault] = None,
existing_nullable: Optional[bool] = None,
**kw: Any,
) -> None:
if nullable is not None:
if type_ is not None:
# the NULL/NOT NULL alter will handle
# the type alteration
existing_type = type_
type_ = None
elif existing_type is None:
raise util.CommandError(
"MS-SQL ALTER COLUMN operations "
"with NULL or NOT NULL require the "
"existing_type or a new type_ be passed."
)
elif existing_nullable is not None and type_ is not None:
nullable = existing_nullable
# the NULL/NOT NULL alter will handle
# the type alteration
existing_type = type_
type_ = None
elif type_ is not None:
util.warn(
"MS-SQL ALTER COLUMN operations that specify type_= "
"should also specify a nullable= or "
"existing_nullable= argument to avoid implicit conversion "
"of NOT NULL columns to NULL."
)
used_default = False
if sqla_compat._server_default_is_identity(
server_default, existing_server_default
) or sqla_compat._server_default_is_computed(
server_default, existing_server_default
):
used_default = True
kw["server_default"] = server_default
kw["existing_server_default"] = existing_server_default
super().alter_column(
table_name,
column_name,
nullable=nullable,
type_=type_,
schema=schema,
existing_type=existing_type,
existing_nullable=existing_nullable,
**kw,
)
if server_default is not False and used_default is False:
if existing_server_default is not False or server_default is None:
self._exec(
_ExecDropConstraint(
table_name,
column_name,
"sys.default_constraints",
schema,
)
)
if server_default is not None:
super().alter_column(
table_name,
column_name,
schema=schema,
server_default=server_default,
)
if name is not None:
super().alter_column(
table_name, column_name, schema=schema, name=name
)
def create_index(self, index: Index, **kw: Any) -> None:
# this likely defaults to None if not present, so get()
# should normally not return the default value. being
# defensive in any case
mssql_include = index.kwargs.get("mssql_include", None) or ()
assert index.table is not None
for col in mssql_include:
if col not in index.table.c:
index.table.append_column(Column(col, sqltypes.NullType))
self._exec(CreateIndex(index, **kw))
def bulk_insert( # type:ignore[override]
self, table: Union[TableClause, Table], rows: List[dict], **kw: Any
) -> None:
if self.as_sql:
self._exec(
"SET IDENTITY_INSERT %s ON"
% self.dialect.identifier_preparer.format_table(table)
)
super().bulk_insert(table, rows, **kw)
self._exec(
"SET IDENTITY_INSERT %s OFF"
% self.dialect.identifier_preparer.format_table(table)
)
else:
super().bulk_insert(table, rows, **kw)
def drop_column(
self,
table_name: str,
column: Column[Any],
schema: Optional[str] = None,
**kw,
) -> None:
drop_default = kw.pop("mssql_drop_default", False)
if drop_default:
self._exec(
_ExecDropConstraint(
table_name, column, "sys.default_constraints", schema
)
)
drop_check = kw.pop("mssql_drop_check", False)
if drop_check:
self._exec(
_ExecDropConstraint(
table_name, column, "sys.check_constraints", schema
)
)
drop_fks = kw.pop("mssql_drop_foreign_key", False)
if drop_fks:
self._exec(_ExecDropFKConstraint(table_name, column, schema))
super().drop_column(table_name, column, schema=schema, **kw)
def compare_server_default(
self,
inspector_column,
metadata_column,
rendered_metadata_default,
rendered_inspector_default,
):
if rendered_metadata_default is not None:
rendered_metadata_default = re.sub(
r"[\(\) \"\']", "", rendered_metadata_default
)
if rendered_inspector_default is not None:
# SQL Server collapses whitespace and adds arbitrary parenthesis
# within expressions. our only option is collapse all of it
rendered_inspector_default = re.sub(
r"[\(\) \"\']", "", rendered_inspector_default
)
return rendered_inspector_default != rendered_metadata_default
def _compare_identity_default(self, metadata_identity, inspector_identity):
diff, ignored, is_alter = super()._compare_identity_default(
metadata_identity, inspector_identity
)
if (
metadata_identity is None
and inspector_identity is not None
and not diff
and inspector_identity.column is not None
and inspector_identity.column.primary_key
):
# mssql reflect primary keys with autoincrement as identity
# columns. if no different attributes are present ignore them
is_alter = False
return diff, ignored, is_alter
def adjust_reflected_dialect_options(
self, reflected_object: Dict[str, Any], kind: str
) -> Dict[str, Any]:
options: Dict[str, Any]
options = reflected_object.get("dialect_options", {}).copy()
if not options.get("mssql_include"):
options.pop("mssql_include", None)
if not options.get("mssql_clustered"):
options.pop("mssql_clustered", None)
return options
class _ExecDropConstraint(Executable, ClauseElement):
inherit_cache = False
def __init__(
self,
tname: str,
colname: Union[Column[Any], str],
type_: str,
schema: Optional[str],
) -> None:
self.tname = tname
self.colname = colname
self.type_ = type_
self.schema = schema
class _ExecDropFKConstraint(Executable, ClauseElement):
inherit_cache = False
def __init__(
self, tname: str, colname: Column[Any], schema: Optional[str]
) -> None:
self.tname = tname
self.colname = colname
self.schema = schema
@compiles(_ExecDropConstraint, "mssql")
def _exec_drop_col_constraint(
element: _ExecDropConstraint, compiler: MSSQLCompiler, **kw
) -> str:
schema, tname, colname, type_ = (
element.schema,
element.tname,
element.colname,
element.type_,
)
# from http://www.mssqltips.com/sqlservertip/1425/\
# working-with-default-constraints-in-sql-server/
return """declare @const_name varchar(256)
select @const_name = QUOTENAME([name]) from %(type)s
where parent_object_id = object_id('%(schema_dot)s%(tname)s')
and col_name(parent_object_id, parent_column_id) = '%(colname)s'
exec('alter table %(tname_quoted)s drop constraint ' + @const_name)""" % {
"type": type_,
"tname": tname,
"colname": colname,
"tname_quoted": format_table_name(compiler, tname, schema),
"schema_dot": schema + "." if schema else "",
}
@compiles(_ExecDropFKConstraint, "mssql")
def _exec_drop_col_fk_constraint(
element: _ExecDropFKConstraint, compiler: MSSQLCompiler, **kw
) -> str:
schema, tname, colname = element.schema, element.tname, element.colname
return """declare @const_name varchar(256)
select @const_name = QUOTENAME([name]) from
sys.foreign_keys fk join sys.foreign_key_columns fkc
on fk.object_id=fkc.constraint_object_id
where fkc.parent_object_id = object_id('%(schema_dot)s%(tname)s')
and col_name(fkc.parent_object_id, fkc.parent_column_id) = '%(colname)s'
exec('alter table %(tname_quoted)s drop constraint ' + @const_name)""" % {
"tname": tname,
"colname": colname,
"tname_quoted": format_table_name(compiler, tname, schema),
"schema_dot": schema + "." if schema else "",
}
@compiles(AddColumn, "mssql")
def visit_add_column(element: AddColumn, compiler: MSDDLCompiler, **kw) -> str:
return "%s %s" % (
alter_table(compiler, element.table_name, element.schema),
mssql_add_column(compiler, element.column, **kw),
)
def mssql_add_column(
compiler: MSDDLCompiler, column: Column[Any], **kw
) -> str:
return "ADD %s" % compiler.get_column_specification(column, **kw)
@compiles(ColumnNullable, "mssql")
def visit_column_nullable(
element: ColumnNullable, compiler: MSDDLCompiler, **kw
) -> str:
return "%s %s %s %s" % (
alter_table(compiler, element.table_name, element.schema),
alter_column(compiler, element.column_name),
format_type(compiler, element.existing_type), # type: ignore[arg-type]
"NULL" if element.nullable else "NOT NULL",
)
@compiles(ColumnDefault, "mssql")
def visit_column_default(
element: ColumnDefault, compiler: MSDDLCompiler, **kw
) -> str:
# TODO: there can also be a named constraint
# with ADD CONSTRAINT here
return "%s ADD DEFAULT %s FOR %s" % (
alter_table(compiler, element.table_name, element.schema),
format_server_default(compiler, element.default),
format_column_name(compiler, element.column_name),
)
@compiles(ColumnName, "mssql")
def visit_rename_column(
element: ColumnName, compiler: MSDDLCompiler, **kw
) -> str:
return "EXEC sp_rename '%s.%s', %s, 'COLUMN'" % (
format_table_name(compiler, element.table_name, element.schema),
format_column_name(compiler, element.column_name),
format_column_name(compiler, element.newname),
)
@compiles(ColumnType, "mssql")
def visit_column_type(
element: ColumnType, compiler: MSDDLCompiler, **kw
) -> str:
return "%s %s %s" % (
alter_table(compiler, element.table_name, element.schema),
alter_column(compiler, element.column_name),
format_type(compiler, element.type_),
)
@compiles(RenameTable, "mssql")
def visit_rename_table(
element: RenameTable, compiler: MSDDLCompiler, **kw
) -> str:
return "EXEC sp_rename '%s', %s" % (
format_table_name(compiler, element.table_name, element.schema),
format_table_name(compiler, element.new_table_name, None),
)
|
# mypy: allow-untyped-defs, allow-incomplete-defs, allow-untyped-calls
# mypy: no-warn-return-any, allow-any-generics
from __future__ import annotations
import re
from typing import Any
from typing import Optional
from typing import TYPE_CHECKING
from typing import Union
from sqlalchemy import schema
from sqlalchemy import types as sqltypes
from .base import alter_table
from .base import AlterColumn
from .base import ColumnDefault
from .base import ColumnName
from .base import ColumnNullable
from .base import ColumnType
from .base import format_column_name
from .base import format_server_default
from .impl import DefaultImpl
from .. import util
from ..util import sqla_compat
from ..util.sqla_compat import _is_type_bound
from ..util.sqla_compat import compiles
if TYPE_CHECKING:
from typing import Literal
from sqlalchemy.dialects.mysql.base import MySQLDDLCompiler
from sqlalchemy.sql.ddl import DropConstraint
from sqlalchemy.sql.schema import Constraint
from sqlalchemy.sql.type_api import TypeEngine
from .base import _ServerDefault
class MySQLImpl(DefaultImpl):
__dialect__ = "mysql"
transactional_ddl = False
type_synonyms = DefaultImpl.type_synonyms + (
{"BOOL", "TINYINT"},
{"JSON", "LONGTEXT"},
)
type_arg_extract = [r"character set ([\w\-_]+)", r"collate ([\w\-_]+)"]
def alter_column( # type:ignore[override]
self,
table_name: str,
column_name: str,
nullable: Optional[bool] = None,
server_default: Union[_ServerDefault, Literal[False]] = False,
name: Optional[str] = None,
type_: Optional[TypeEngine] = None,
schema: Optional[str] = None,
existing_type: Optional[TypeEngine] = None,
existing_server_default: Optional[_ServerDefault] = None,
existing_nullable: Optional[bool] = None,
autoincrement: Optional[bool] = None,
existing_autoincrement: Optional[bool] = None,
comment: Optional[Union[str, Literal[False]]] = False,
existing_comment: Optional[str] = None,
**kw: Any,
) -> None:
if sqla_compat._server_default_is_identity(
server_default, existing_server_default
) or sqla_compat._server_default_is_computed(
server_default, existing_server_default
):
# modifying computed or identity columns is not supported
# the default will raise
super().alter_column(
table_name,
column_name,
nullable=nullable,
type_=type_,
schema=schema,
existing_type=existing_type,
existing_nullable=existing_nullable,
server_default=server_default,
existing_server_default=existing_server_default,
**kw,
)
if name is not None or self._is_mysql_allowed_functional_default(
type_ if type_ is not None else existing_type, server_default
):
self._exec(
MySQLChangeColumn(
table_name,
column_name,
schema=schema,
newname=name if name is not None else column_name,
nullable=(
nullable
if nullable is not None
else (
existing_nullable
if existing_nullable is not None
else True
)
),
type_=type_ if type_ is not None else existing_type,
default=(
server_default
if server_default is not False
else existing_server_default
),
autoincrement=(
autoincrement
if autoincrement is not None
else existing_autoincrement
),
comment=(
comment if comment is not False else existing_comment
),
)
)
elif (
nullable is not None
or type_ is not None
or autoincrement is not None
or comment is not False
):
self._exec(
MySQLModifyColumn(
table_name,
column_name,
schema=schema,
newname=name if name is not None else column_name,
nullable=(
nullable
if nullable is not None
else (
existing_nullable
if existing_nullable is not None
else True
)
),
type_=type_ if type_ is not None else existing_type,
default=(
server_default
if server_default is not False
else existing_server_default
),
autoincrement=(
autoincrement
if autoincrement is not None
else existing_autoincrement
),
comment=(
comment if comment is not False else existing_comment
),
)
)
elif server_default is not False:
self._exec(
MySQLAlterDefault(
table_name, column_name, server_default, schema=schema
)
)
def drop_constraint(
self,
const: Constraint,
) -> None:
if isinstance(const, schema.CheckConstraint) and _is_type_bound(const):
return
super().drop_constraint(const)
def _is_mysql_allowed_functional_default(
self,
type_: Optional[TypeEngine],
server_default: Union[_ServerDefault, Literal[False]],
) -> bool:
return (
type_ is not None
and type_._type_affinity is sqltypes.DateTime
and server_default is not None
)
def compare_server_default(
self,
inspector_column,
metadata_column,
rendered_metadata_default,
rendered_inspector_default,
):
# partially a workaround for SQLAlchemy issue #3023; if the
# column were created without "NOT NULL", MySQL may have added
# an implicit default of '0' which we need to skip
# TODO: this is not really covered anymore ?
if (
metadata_column.type._type_affinity is sqltypes.Integer
and inspector_column.primary_key
and not inspector_column.autoincrement
and not rendered_metadata_default
and rendered_inspector_default == "'0'"
):
return False
elif (
rendered_inspector_default
and inspector_column.type._type_affinity is sqltypes.Integer
):
rendered_inspector_default = (
re.sub(r"^'|'$", "", rendered_inspector_default)
if rendered_inspector_default is not None
else None
)
return rendered_inspector_default != rendered_metadata_default
elif (
rendered_metadata_default
and metadata_column.type._type_affinity is sqltypes.String
):
metadata_default = re.sub(r"^'|'$", "", rendered_metadata_default)
return rendered_inspector_default != f"'{metadata_default}'"
elif rendered_inspector_default and rendered_metadata_default:
# adjust for "function()" vs. "FUNCTION" as can occur particularly
# for the CURRENT_TIMESTAMP function on newer MariaDB versions
# SQLAlchemy MySQL dialect bundles ON UPDATE into the server
# default; adjust for this possibly being present.
onupdate_ins = re.match(
r"(.*) (on update.*?)(?:\(\))?$",
rendered_inspector_default.lower(),
)
onupdate_met = re.match(
r"(.*) (on update.*?)(?:\(\))?$",
rendered_metadata_default.lower(),
)
if onupdate_ins:
if not onupdate_met:
return True
elif onupdate_ins.group(2) != onupdate_met.group(2):
return True
rendered_inspector_default = onupdate_ins.group(1)
rendered_metadata_default = onupdate_met.group(1)
return re.sub(
r"(.*?)(?:\(\))?$", r"\1", rendered_inspector_default.lower()
) != re.sub(
r"(.*?)(?:\(\))?$", r"\1", rendered_metadata_default.lower()
)
else:
return rendered_inspector_default != rendered_metadata_default
def correct_for_autogen_constraints(
self,
conn_unique_constraints,
conn_indexes,
metadata_unique_constraints,
metadata_indexes,
):
# TODO: if SQLA 1.0, make use of "duplicates_index"
# metadata
removed = set()
for idx in list(conn_indexes):
if idx.unique:
continue
# MySQL puts implicit indexes on FK columns, even if
# composite and even if MyISAM, so can't check this too easily.
# the name of the index may be the column name or it may
# be the name of the FK constraint.
for col in idx.columns:
if idx.name == col.name:
conn_indexes.remove(idx)
removed.add(idx.name)
break
for fk in col.foreign_keys:
if fk.name == idx.name:
conn_indexes.remove(idx)
removed.add(idx.name)
break
if idx.name in removed:
break
# then remove indexes from the "metadata_indexes"
# that we've removed from reflected, otherwise they come out
# as adds (see #202)
for idx in list(metadata_indexes):
if idx.name in removed:
metadata_indexes.remove(idx)
def correct_for_autogen_foreignkeys(self, conn_fks, metadata_fks):
conn_fk_by_sig = {
self._create_reflected_constraint_sig(fk).unnamed_no_options: fk
for fk in conn_fks
}
metadata_fk_by_sig = {
self._create_metadata_constraint_sig(fk).unnamed_no_options: fk
for fk in metadata_fks
}
for sig in set(conn_fk_by_sig).intersection(metadata_fk_by_sig):
mdfk = metadata_fk_by_sig[sig]
cnfk = conn_fk_by_sig[sig]
# MySQL considers RESTRICT to be the default and doesn't
# report on it. if the model has explicit RESTRICT and
# the conn FK has None, set it to RESTRICT
if (
mdfk.ondelete is not None
and mdfk.ondelete.lower() == "restrict"
and cnfk.ondelete is None
):
cnfk.ondelete = "RESTRICT"
if (
mdfk.onupdate is not None
and mdfk.onupdate.lower() == "restrict"
and cnfk.onupdate is None
):
cnfk.onupdate = "RESTRICT"
class MariaDBImpl(MySQLImpl):
__dialect__ = "mariadb"
class MySQLAlterDefault(AlterColumn):
def __init__(
self,
name: str,
column_name: str,
default: _ServerDefault,
schema: Optional[str] = None,
) -> None:
super(AlterColumn, self).__init__(name, schema=schema)
self.column_name = column_name
self.default = default
class MySQLChangeColumn(AlterColumn):
def __init__(
self,
name: str,
column_name: str,
schema: Optional[str] = None,
newname: Optional[str] = None,
type_: Optional[TypeEngine] = None,
nullable: Optional[bool] = None,
default: Optional[Union[_ServerDefault, Literal[False]]] = False,
autoincrement: Optional[bool] = None,
comment: Optional[Union[str, Literal[False]]] = False,
) -> None:
super(AlterColumn, self).__init__(name, schema=schema)
self.column_name = column_name
self.nullable = nullable
self.newname = newname
self.default = default
self.autoincrement = autoincrement
self.comment = comment
if type_ is None:
raise util.CommandError(
"All MySQL CHANGE/MODIFY COLUMN operations "
"require the existing type."
)
self.type_ = sqltypes.to_instance(type_)
class MySQLModifyColumn(MySQLChangeColumn):
pass
@compiles(ColumnNullable, "mysql", "mariadb")
@compiles(ColumnName, "mysql", "mariadb")
@compiles(ColumnDefault, "mysql", "mariadb")
@compiles(ColumnType, "mysql", "mariadb")
def _mysql_doesnt_support_individual(element, compiler, **kw):
raise NotImplementedError(
"Individual alter column constructs not supported by MySQL"
)
@compiles(MySQLAlterDefault, "mysql", "mariadb")
def _mysql_alter_default(
element: MySQLAlterDefault, compiler: MySQLDDLCompiler, **kw
) -> str:
return "%s ALTER COLUMN %s %s" % (
alter_table(compiler, element.table_name, element.schema),
format_column_name(compiler, element.column_name),
(
"SET DEFAULT %s" % format_server_default(compiler, element.default)
if element.default is not None
else "DROP DEFAULT"
),
)
@compiles(MySQLModifyColumn, "mysql", "mariadb")
def _mysql_modify_column(
element: MySQLModifyColumn, compiler: MySQLDDLCompiler, **kw
) -> str:
return "%s MODIFY %s %s" % (
alter_table(compiler, element.table_name, element.schema),
format_column_name(compiler, element.column_name),
_mysql_colspec(
compiler,
nullable=element.nullable,
server_default=element.default,
type_=element.type_,
autoincrement=element.autoincrement,
comment=element.comment,
),
)
@compiles(MySQLChangeColumn, "mysql", "mariadb")
def _mysql_change_column(
element: MySQLChangeColumn, compiler: MySQLDDLCompiler, **kw
) -> str:
return "%s CHANGE %s %s %s" % (
alter_table(compiler, element.table_name, element.schema),
format_column_name(compiler, element.column_name),
format_column_name(compiler, element.newname),
_mysql_colspec(
compiler,
nullable=element.nullable,
server_default=element.default,
type_=element.type_,
autoincrement=element.autoincrement,
comment=element.comment,
),
)
def _mysql_colspec(
compiler: MySQLDDLCompiler,
nullable: Optional[bool],
server_default: Optional[Union[_ServerDefault, Literal[False]]],
type_: TypeEngine,
autoincrement: Optional[bool],
comment: Optional[Union[str, Literal[False]]],
) -> str:
spec = "%s %s" % (
compiler.dialect.type_compiler.process(type_),
"NULL" if nullable else "NOT NULL",
)
if autoincrement:
spec += " AUTO_INCREMENT"
if server_default is not False and server_default is not None:
spec += " DEFAULT %s" % format_server_default(compiler, server_default)
if comment:
spec += " COMMENT %s" % compiler.sql_compiler.render_literal_value(
comment, sqltypes.String()
)
return spec
@compiles(schema.DropConstraint, "mysql", "mariadb")
def _mysql_drop_constraint(
element: DropConstraint, compiler: MySQLDDLCompiler, **kw
) -> str:
"""Redefine SQLAlchemy's drop constraint to
raise errors for invalid constraint type."""
constraint = element.element
if isinstance(
constraint,
(
schema.ForeignKeyConstraint,
schema.PrimaryKeyConstraint,
schema.UniqueConstraint,
),
):
assert not kw
return compiler.visit_drop_constraint(element)
elif isinstance(constraint, schema.CheckConstraint):
# note that SQLAlchemy as of 1.2 does not yet support
# DROP CONSTRAINT for MySQL/MariaDB, so we implement fully
# here.
if compiler.dialect.is_mariadb: # type: ignore[attr-defined]
return "ALTER TABLE %s DROP CONSTRAINT %s" % (
compiler.preparer.format_table(constraint.table),
compiler.preparer.format_constraint(constraint),
)
else:
return "ALTER TABLE %s DROP CHECK %s" % (
compiler.preparer.format_table(constraint.table),
compiler.preparer.format_constraint(constraint),
)
else:
raise NotImplementedError(
"No generic 'DROP CONSTRAINT' in MySQL - "
"please specify constraint type"
)
|
# mypy: allow-untyped-defs, allow-incomplete-defs, allow-untyped-calls
# mypy: no-warn-return-any, allow-any-generics
from __future__ import annotations
import re
from typing import Any
from typing import Optional
from typing import TYPE_CHECKING
from sqlalchemy.sql import sqltypes
from .base import AddColumn
from .base import alter_table
from .base import ColumnComment
from .base import ColumnDefault
from .base import ColumnName
from .base import ColumnNullable
from .base import ColumnType
from .base import format_column_name
from .base import format_server_default
from .base import format_table_name
from .base import format_type
from .base import IdentityColumnDefault
from .base import RenameTable
from .impl import DefaultImpl
from ..util.sqla_compat import compiles
if TYPE_CHECKING:
from sqlalchemy.dialects.oracle.base import OracleDDLCompiler
from sqlalchemy.engine.cursor import CursorResult
from sqlalchemy.sql.schema import Column
class OracleImpl(DefaultImpl):
__dialect__ = "oracle"
transactional_ddl = False
batch_separator = "/"
command_terminator = ""
type_synonyms = DefaultImpl.type_synonyms + (
{"VARCHAR", "VARCHAR2"},
{"BIGINT", "INTEGER", "SMALLINT", "DECIMAL", "NUMERIC", "NUMBER"},
{"DOUBLE", "FLOAT", "DOUBLE_PRECISION"},
)
identity_attrs_ignore = ()
def __init__(self, *arg, **kw) -> None:
super().__init__(*arg, **kw)
self.batch_separator = self.context_opts.get(
"oracle_batch_separator", self.batch_separator
)
def _exec(self, construct: Any, *args, **kw) -> Optional[CursorResult]:
result = super()._exec(construct, *args, **kw)
if self.as_sql and self.batch_separator:
self.static_output(self.batch_separator)
return result
def compare_server_default(
self,
inspector_column,
metadata_column,
rendered_metadata_default,
rendered_inspector_default,
):
if rendered_metadata_default is not None:
rendered_metadata_default = re.sub(
r"^\((.+)\)$", r"\1", rendered_metadata_default
)
rendered_metadata_default = re.sub(
r"^\"?'(.+)'\"?$", r"\1", rendered_metadata_default
)
if rendered_inspector_default is not None:
rendered_inspector_default = re.sub(
r"^\((.+)\)$", r"\1", rendered_inspector_default
)
rendered_inspector_default = re.sub(
r"^\"?'(.+)'\"?$", r"\1", rendered_inspector_default
)
rendered_inspector_default = rendered_inspector_default.strip()
return rendered_inspector_default != rendered_metadata_default
def emit_begin(self) -> None:
self._exec("SET TRANSACTION READ WRITE")
def emit_commit(self) -> None:
self._exec("COMMIT")
@compiles(AddColumn, "oracle")
def visit_add_column(
element: AddColumn, compiler: OracleDDLCompiler, **kw
) -> str:
return "%s %s" % (
alter_table(compiler, element.table_name, element.schema),
add_column(compiler, element.column, **kw),
)
@compiles(ColumnNullable, "oracle")
def visit_column_nullable(
element: ColumnNullable, compiler: OracleDDLCompiler, **kw
) -> str:
return "%s %s %s" % (
alter_table(compiler, element.table_name, element.schema),
alter_column(compiler, element.column_name),
"NULL" if element.nullable else "NOT NULL",
)
@compiles(ColumnType, "oracle")
def visit_column_type(
element: ColumnType, compiler: OracleDDLCompiler, **kw
) -> str:
return "%s %s %s" % (
alter_table(compiler, element.table_name, element.schema),
alter_column(compiler, element.column_name),
"%s" % format_type(compiler, element.type_),
)
@compiles(ColumnName, "oracle")
def visit_column_name(
element: ColumnName, compiler: OracleDDLCompiler, **kw
) -> str:
return "%s RENAME COLUMN %s TO %s" % (
alter_table(compiler, element.table_name, element.schema),
format_column_name(compiler, element.column_name),
format_column_name(compiler, element.newname),
)
@compiles(ColumnDefault, "oracle")
def visit_column_default(
element: ColumnDefault, compiler: OracleDDLCompiler, **kw
) -> str:
return "%s %s %s" % (
alter_table(compiler, element.table_name, element.schema),
alter_column(compiler, element.column_name),
(
"DEFAULT %s" % format_server_default(compiler, element.default)
if element.default is not None
else "DEFAULT NULL"
),
)
@compiles(ColumnComment, "oracle")
def visit_column_comment(
element: ColumnComment, compiler: OracleDDLCompiler, **kw
) -> str:
ddl = "COMMENT ON COLUMN {table_name}.{column_name} IS {comment}"
comment = compiler.sql_compiler.render_literal_value(
(element.comment if element.comment is not None else ""),
sqltypes.String(),
)
return ddl.format(
table_name=element.table_name,
column_name=element.column_name,
comment=comment,
)
@compiles(RenameTable, "oracle")
def visit_rename_table(
element: RenameTable, compiler: OracleDDLCompiler, **kw
) -> str:
return "%s RENAME TO %s" % (
alter_table(compiler, element.table_name, element.schema),
format_table_name(compiler, element.new_table_name, None),
)
def alter_column(compiler: OracleDDLCompiler, name: str) -> str:
return "MODIFY %s" % format_column_name(compiler, name)
def add_column(compiler: OracleDDLCompiler, column: Column[Any], **kw) -> str:
return "ADD %s" % compiler.get_column_specification(column, **kw)
@compiles(IdentityColumnDefault, "oracle")
def visit_identity_column(
element: IdentityColumnDefault, compiler: OracleDDLCompiler, **kw
):
text = "%s %s " % (
alter_table(compiler, element.table_name, element.schema),
alter_column(compiler, element.column_name),
)
if element.default is None:
# drop identity
text += "DROP IDENTITY"
return text
else:
text += compiler.visit_identity_column(element.default)
return text
|
# mypy: allow-untyped-defs, allow-incomplete-defs, allow-untyped-calls
# mypy: no-warn-return-any, allow-any-generics
from __future__ import annotations
import logging
import re
from typing import Any
from typing import cast
from typing import Dict
from typing import List
from typing import Optional
from typing import Sequence
from typing import Tuple
from typing import TYPE_CHECKING
from typing import Union
from sqlalchemy import Column
from sqlalchemy import Float
from sqlalchemy import Identity
from sqlalchemy import literal_column
from sqlalchemy import Numeric
from sqlalchemy import select
from sqlalchemy import text
from sqlalchemy import types as sqltypes
from sqlalchemy.dialects.postgresql import BIGINT
from sqlalchemy.dialects.postgresql import ExcludeConstraint
from sqlalchemy.dialects.postgresql import INTEGER
from sqlalchemy.schema import CreateIndex
from sqlalchemy.sql.elements import ColumnClause
from sqlalchemy.sql.elements import TextClause
from sqlalchemy.sql.functions import FunctionElement
from sqlalchemy.types import NULLTYPE
from .base import alter_column
from .base import alter_table
from .base import AlterColumn
from .base import ColumnComment
from .base import format_column_name
from .base import format_table_name
from .base import format_type
from .base import IdentityColumnDefault
from .base import RenameTable
from .impl import ComparisonResult
from .impl import DefaultImpl
from .. import util
from ..autogenerate import render
from ..operations import ops
from ..operations import schemaobj
from ..operations.base import BatchOperations
from ..operations.base import Operations
from ..util import sqla_compat
from ..util.sqla_compat import compiles
if TYPE_CHECKING:
from typing import Literal
from sqlalchemy import Index
from sqlalchemy import UniqueConstraint
from sqlalchemy.dialects.postgresql.array import ARRAY
from sqlalchemy.dialects.postgresql.base import PGDDLCompiler
from sqlalchemy.dialects.postgresql.hstore import HSTORE
from sqlalchemy.dialects.postgresql.json import JSON
from sqlalchemy.dialects.postgresql.json import JSONB
from sqlalchemy.sql.elements import ClauseElement
from sqlalchemy.sql.elements import ColumnElement
from sqlalchemy.sql.elements import quoted_name
from sqlalchemy.sql.schema import MetaData
from sqlalchemy.sql.schema import Table
from sqlalchemy.sql.type_api import TypeEngine
from .base import _ServerDefault
from ..autogenerate.api import AutogenContext
from ..autogenerate.render import _f_name
from ..runtime.migration import MigrationContext
log = logging.getLogger(__name__)
class PostgresqlImpl(DefaultImpl):
__dialect__ = "postgresql"
transactional_ddl = True
type_synonyms = DefaultImpl.type_synonyms + (
{"FLOAT", "DOUBLE PRECISION"},
)
def create_index(self, index: Index, **kw: Any) -> None:
# this likely defaults to None if not present, so get()
# should normally not return the default value. being
# defensive in any case
postgresql_include = index.kwargs.get("postgresql_include", None) or ()
for col in postgresql_include:
if col not in index.table.c: # type: ignore[union-attr]
index.table.append_column( # type: ignore[union-attr]
Column(col, sqltypes.NullType)
)
self._exec(CreateIndex(index, **kw))
def prep_table_for_batch(self, batch_impl, table):
for constraint in table.constraints:
if (
constraint.name is not None
and constraint.name in batch_impl.named_constraints
):
self.drop_constraint(constraint)
def compare_server_default(
self,
inspector_column,
metadata_column,
rendered_metadata_default,
rendered_inspector_default,
):
# don't do defaults for SERIAL columns
if (
metadata_column.primary_key
and metadata_column is metadata_column.table._autoincrement_column
):
return False
conn_col_default = rendered_inspector_default
defaults_equal = conn_col_default == rendered_metadata_default
if defaults_equal:
return False
if None in (
conn_col_default,
rendered_metadata_default,
metadata_column.server_default,
):
return not defaults_equal
metadata_default = metadata_column.server_default.arg
if isinstance(metadata_default, str):
if not isinstance(inspector_column.type, (Numeric, Float)):
metadata_default = re.sub(r"^'|'$", "", metadata_default)
metadata_default = f"'{metadata_default}'"
metadata_default = literal_column(metadata_default)
# run a real compare against the server
conn = self.connection
assert conn is not None
return not conn.scalar(
select(literal_column(conn_col_default) == metadata_default)
)
def alter_column( # type:ignore[override]
self,
table_name: str,
column_name: str,
nullable: Optional[bool] = None,
server_default: Union[_ServerDefault, Literal[False]] = False,
name: Optional[str] = None,
type_: Optional[TypeEngine] = None,
schema: Optional[str] = None,
autoincrement: Optional[bool] = None,
existing_type: Optional[TypeEngine] = None,
existing_server_default: Optional[_ServerDefault] = None,
existing_nullable: Optional[bool] = None,
existing_autoincrement: Optional[bool] = None,
**kw: Any,
) -> None:
using = kw.pop("postgresql_using", None)
if using is not None and type_ is None:
raise util.CommandError(
"postgresql_using must be used with the type_ parameter"
)
if type_ is not None:
self._exec(
PostgresqlColumnType(
table_name,
column_name,
type_,
schema=schema,
using=using,
existing_type=existing_type,
existing_server_default=existing_server_default,
existing_nullable=existing_nullable,
)
)
super().alter_column(
table_name,
column_name,
nullable=nullable,
server_default=server_default,
name=name,
schema=schema,
autoincrement=autoincrement,
existing_type=existing_type,
existing_server_default=existing_server_default,
existing_nullable=existing_nullable,
existing_autoincrement=existing_autoincrement,
**kw,
)
def autogen_column_reflect(self, inspector, table, column_info):
if column_info.get("default") and isinstance(
column_info["type"], (INTEGER, BIGINT)
):
seq_match = re.match(
r"nextval\('(.+?)'::regclass\)", column_info["default"]
)
if seq_match:
info = sqla_compat._exec_on_inspector(
inspector,
text(
"select c.relname, a.attname "
"from pg_class as c join "
"pg_depend d on d.objid=c.oid and "
"d.classid='pg_class'::regclass and "
"d.refclassid='pg_class'::regclass "
"join pg_class t on t.oid=d.refobjid "
"join pg_attribute a on a.attrelid=t.oid and "
"a.attnum=d.refobjsubid "
"where c.relkind='S' and "
"c.oid=cast(:seqname as regclass)"
),
seqname=seq_match.group(1),
).first()
if info:
seqname, colname = info
if colname == column_info["name"]:
log.info(
"Detected sequence named '%s' as "
"owned by integer column '%s(%s)', "
"assuming SERIAL and omitting",
seqname,
table.name,
colname,
)
# sequence, and the owner is this column,
# its a SERIAL - whack it!
del column_info["default"]
def correct_for_autogen_constraints(
self,
conn_unique_constraints,
conn_indexes,
metadata_unique_constraints,
metadata_indexes,
):
doubled_constraints = {
index
for index in conn_indexes
if index.info.get("duplicates_constraint")
}
for ix in doubled_constraints:
conn_indexes.remove(ix)
if not sqla_compat.sqla_2:
self._skip_functional_indexes(metadata_indexes, conn_indexes)
# pg behavior regarding modifiers
# | # | compiled sql | returned sql | regexp. group is removed |
# | - | ---------------- | -----------------| ------------------------ |
# | 1 | nulls first | nulls first | - |
# | 2 | nulls last | | (?<! desc)( nulls last)$ |
# | 3 | asc | | ( asc)$ |
# | 4 | asc nulls first | nulls first | ( asc) nulls first$ |
# | 5 | asc nulls last | | ( asc nulls last)$ |
# | 6 | desc | desc | - |
# | 7 | desc nulls first | desc | desc( nulls first)$ |
# | 8 | desc nulls last | desc nulls last | - |
_default_modifiers_re = ( # order of case 2 and 5 matters
re.compile("( asc nulls last)$"), # case 5
re.compile("(?<! desc)( nulls last)$"), # case 2
re.compile("( asc)$"), # case 3
re.compile("( asc) nulls first$"), # case 4
re.compile(" desc( nulls first)$"), # case 7
)
def _cleanup_index_expr(self, index: Index, expr: str) -> str:
expr = expr.lower().replace('"', "").replace("'", "")
if index.table is not None:
# should not be needed, since include_table=False is in compile
expr = expr.replace(f"{index.table.name.lower()}.", "")
if "::" in expr:
# strip :: cast. types can have spaces in them
expr = re.sub(r"(::[\w ]+\w)", "", expr)
while expr and expr[0] == "(" and expr[-1] == ")":
expr = expr[1:-1]
# NOTE: when parsing the connection expression this cleanup could
# be skipped
for rs in self._default_modifiers_re:
if match := rs.search(expr):
start, end = match.span(1)
expr = expr[:start] + expr[end:]
break
while expr and expr[0] == "(" and expr[-1] == ")":
expr = expr[1:-1]
# strip casts
cast_re = re.compile(r"cast\s*\(")
if cast_re.match(expr):
expr = cast_re.sub("", expr)
# remove the as type
expr = re.sub(r"as\s+[^)]+\)", "", expr)
# remove spaces
expr = expr.replace(" ", "")
return expr
def _dialect_options(
self, item: Union[Index, UniqueConstraint]
) -> Tuple[Any, ...]:
# only the positive case is returned by sqlalchemy reflection so
# None and False are threated the same
if item.dialect_kwargs.get("postgresql_nulls_not_distinct"):
return ("nulls_not_distinct",)
return ()
def compare_indexes(
self,
metadata_index: Index,
reflected_index: Index,
) -> ComparisonResult:
msg = []
unique_msg = self._compare_index_unique(
metadata_index, reflected_index
)
if unique_msg:
msg.append(unique_msg)
m_exprs = metadata_index.expressions
r_exprs = reflected_index.expressions
if len(m_exprs) != len(r_exprs):
msg.append(f"expression number {len(r_exprs)} to {len(m_exprs)}")
if msg:
# no point going further, return early
return ComparisonResult.Different(msg)
skip = []
for pos, (m_e, r_e) in enumerate(zip(m_exprs, r_exprs), 1):
m_compile = self._compile_element(m_e)
m_text = self._cleanup_index_expr(metadata_index, m_compile)
# print(f"META ORIG: {m_compile!r} CLEANUP: {m_text!r}")
r_compile = self._compile_element(r_e)
r_text = self._cleanup_index_expr(metadata_index, r_compile)
# print(f"CONN ORIG: {r_compile!r} CLEANUP: {r_text!r}")
if m_text == r_text:
continue # expressions these are equal
elif m_compile.strip().endswith("_ops") and (
" " in m_compile or ")" in m_compile # is an expression
):
skip.append(
f"expression #{pos} {m_compile!r} detected "
"as including operator clause."
)
util.warn(
f"Expression #{pos} {m_compile!r} in index "
f"{reflected_index.name!r} detected to include "
"an operator clause. Expression compare cannot proceed. "
"Please move the operator clause to the "
"``postgresql_ops`` dict to enable proper compare "
"of the index expressions: "
"https://docs.sqlalchemy.org/en/latest/dialects/postgresql.html#operator-classes", # noqa: E501
)
else:
msg.append(f"expression #{pos} {r_compile!r} to {m_compile!r}")
m_options = self._dialect_options(metadata_index)
r_options = self._dialect_options(reflected_index)
if m_options != r_options:
msg.extend(f"options {r_options} to {m_options}")
if msg:
return ComparisonResult.Different(msg)
elif skip:
# if there are other changes detected don't skip the index
return ComparisonResult.Skip(skip)
else:
return ComparisonResult.Equal()
def compare_unique_constraint(
self,
metadata_constraint: UniqueConstraint,
reflected_constraint: UniqueConstraint,
) -> ComparisonResult:
metadata_tup = self._create_metadata_constraint_sig(
metadata_constraint
)
reflected_tup = self._create_reflected_constraint_sig(
reflected_constraint
)
meta_sig = metadata_tup.unnamed
conn_sig = reflected_tup.unnamed
if conn_sig != meta_sig:
return ComparisonResult.Different(
f"expression {conn_sig} to {meta_sig}"
)
metadata_do = self._dialect_options(metadata_tup.const)
conn_do = self._dialect_options(reflected_tup.const)
if metadata_do != conn_do:
return ComparisonResult.Different(
f"expression {conn_do} to {metadata_do}"
)
return ComparisonResult.Equal()
def adjust_reflected_dialect_options(
self, reflected_options: Dict[str, Any], kind: str
) -> Dict[str, Any]:
options: Dict[str, Any]
options = reflected_options.get("dialect_options", {}).copy()
if not options.get("postgresql_include"):
options.pop("postgresql_include", None)
return options
def _compile_element(self, element: Union[ClauseElement, str]) -> str:
if isinstance(element, str):
return element
return element.compile(
dialect=self.dialect,
compile_kwargs={"literal_binds": True, "include_table": False},
).string
def render_ddl_sql_expr(
self,
expr: ClauseElement,
is_server_default: bool = False,
is_index: bool = False,
**kw: Any,
) -> str:
"""Render a SQL expression that is typically a server default,
index expression, etc.
"""
# apply self_group to index expressions;
# see https://github.com/sqlalchemy/sqlalchemy/blob/
# 82fa95cfce070fab401d020c6e6e4a6a96cc2578/
# lib/sqlalchemy/dialects/postgresql/base.py#L2261
if is_index and not isinstance(expr, ColumnClause):
expr = expr.self_group()
return super().render_ddl_sql_expr(
expr, is_server_default=is_server_default, is_index=is_index, **kw
)
def render_type(
self, type_: TypeEngine, autogen_context: AutogenContext
) -> Union[str, Literal[False]]:
mod = type(type_).__module__
if not mod.startswith("sqlalchemy.dialects.postgresql"):
return False
if hasattr(self, "_render_%s_type" % type_.__visit_name__):
meth = getattr(self, "_render_%s_type" % type_.__visit_name__)
return meth(type_, autogen_context)
return False
def _render_HSTORE_type(
self, type_: HSTORE, autogen_context: AutogenContext
) -> str:
return cast(
str,
render._render_type_w_subtype(
type_, autogen_context, "text_type", r"(.+?\(.*text_type=)"
),
)
def _render_ARRAY_type(
self, type_: ARRAY, autogen_context: AutogenContext
) -> str:
return cast(
str,
render._render_type_w_subtype(
type_, autogen_context, "item_type", r"(.+?\()"
),
)
def _render_JSON_type(
self, type_: JSON, autogen_context: AutogenContext
) -> str:
return cast(
str,
render._render_type_w_subtype(
type_, autogen_context, "astext_type", r"(.+?\(.*astext_type=)"
),
)
def _render_JSONB_type(
self, type_: JSONB, autogen_context: AutogenContext
) -> str:
return cast(
str,
render._render_type_w_subtype(
type_, autogen_context, "astext_type", r"(.+?\(.*astext_type=)"
),
)
class PostgresqlColumnType(AlterColumn):
def __init__(
self, name: str, column_name: str, type_: TypeEngine, **kw
) -> None:
using = kw.pop("using", None)
super().__init__(name, column_name, **kw)
self.type_ = sqltypes.to_instance(type_)
self.using = using
@compiles(RenameTable, "postgresql")
def visit_rename_table(
element: RenameTable, compiler: PGDDLCompiler, **kw
) -> str:
return "%s RENAME TO %s" % (
alter_table(compiler, element.table_name, element.schema),
format_table_name(compiler, element.new_table_name, None),
)
@compiles(PostgresqlColumnType, "postgresql")
def visit_column_type(
element: PostgresqlColumnType, compiler: PGDDLCompiler, **kw
) -> str:
return "%s %s %s %s" % (
alter_table(compiler, element.table_name, element.schema),
alter_column(compiler, element.column_name),
"TYPE %s" % format_type(compiler, element.type_),
"USING %s" % element.using if element.using else "",
)
@compiles(ColumnComment, "postgresql")
def visit_column_comment(
element: ColumnComment, compiler: PGDDLCompiler, **kw
) -> str:
ddl = "COMMENT ON COLUMN {table_name}.{column_name} IS {comment}"
comment = (
compiler.sql_compiler.render_literal_value(
element.comment, sqltypes.String()
)
if element.comment is not None
else "NULL"
)
return ddl.format(
table_name=format_table_name(
compiler, element.table_name, element.schema
),
column_name=format_column_name(compiler, element.column_name),
comment=comment,
)
@compiles(IdentityColumnDefault, "postgresql")
def visit_identity_column(
element: IdentityColumnDefault, compiler: PGDDLCompiler, **kw
):
text = "%s %s " % (
alter_table(compiler, element.table_name, element.schema),
alter_column(compiler, element.column_name),
)
if element.default is None:
# drop identity
text += "DROP IDENTITY"
return text
elif element.existing_server_default is None:
# add identity options
text += "ADD "
text += compiler.visit_identity_column(element.default)
return text
else:
# alter identity
diff, _, _ = element.impl._compare_identity_default(
element.default, element.existing_server_default
)
identity = element.default
for attr in sorted(diff):
if attr == "always":
text += "SET GENERATED %s " % (
"ALWAYS" if identity.always else "BY DEFAULT"
)
else:
text += "SET %s " % compiler.get_identity_options(
Identity(**{attr: getattr(identity, attr)})
)
return text
@Operations.register_operation("create_exclude_constraint")
@BatchOperations.register_operation(
"create_exclude_constraint", "batch_create_exclude_constraint"
)
@ops.AddConstraintOp.register_add_constraint("exclude_constraint")
class CreateExcludeConstraintOp(ops.AddConstraintOp):
"""Represent a create exclude constraint operation."""
constraint_type = "exclude"
def __init__(
self,
constraint_name: sqla_compat._ConstraintName,
table_name: Union[str, quoted_name],
elements: Union[
Sequence[Tuple[str, str]],
Sequence[Tuple[ColumnClause[Any], str]],
],
where: Optional[Union[ColumnElement[bool], str]] = None,
schema: Optional[str] = None,
_orig_constraint: Optional[ExcludeConstraint] = None,
**kw,
) -> None:
self.constraint_name = constraint_name
self.table_name = table_name
self.elements = elements
self.where = where
self.schema = schema
self._orig_constraint = _orig_constraint
self.kw = kw
@classmethod
def from_constraint( # type:ignore[override]
cls, constraint: ExcludeConstraint
) -> CreateExcludeConstraintOp:
constraint_table = sqla_compat._table_for_constraint(constraint)
return cls(
constraint.name,
constraint_table.name,
[ # type: ignore
(expr, op) for expr, name, op in constraint._render_exprs
],
where=cast("ColumnElement[bool] | None", constraint.where),
schema=constraint_table.schema,
_orig_constraint=constraint,
deferrable=constraint.deferrable,
initially=constraint.initially,
using=constraint.using,
)
def to_constraint(
self, migration_context: Optional[MigrationContext] = None
) -> ExcludeConstraint:
if self._orig_constraint is not None:
return self._orig_constraint
schema_obj = schemaobj.SchemaObjects(migration_context)
t = schema_obj.table(self.table_name, schema=self.schema)
excl = ExcludeConstraint(
*self.elements,
name=self.constraint_name,
where=self.where,
**self.kw,
)
for (
expr,
name,
oper,
) in excl._render_exprs:
t.append_column(Column(name, NULLTYPE))
t.append_constraint(excl)
return excl
@classmethod
def create_exclude_constraint(
cls,
operations: Operations,
constraint_name: str,
table_name: str,
*elements: Any,
**kw: Any,
) -> Optional[Table]:
"""Issue an alter to create an EXCLUDE constraint using the
current migration context.
.. note:: This method is Postgresql specific, and additionally
requires at least SQLAlchemy 1.0.
e.g.::
from alembic import op
op.create_exclude_constraint(
"user_excl",
"user",
("period", "&&"),
("group", "="),
where=("group != 'some group'"),
)
Note that the expressions work the same way as that of
the ``ExcludeConstraint`` object itself; if plain strings are
passed, quoting rules must be applied manually.
:param name: Name of the constraint.
:param table_name: String name of the source table.
:param elements: exclude conditions.
:param where: SQL expression or SQL string with optional WHERE
clause.
:param deferrable: optional bool. If set, emit DEFERRABLE or
NOT DEFERRABLE when issuing DDL for this constraint.
:param initially: optional string. If set, emit INITIALLY <value>
when issuing DDL for this constraint.
:param schema: Optional schema name to operate within.
"""
op = cls(constraint_name, table_name, elements, **kw)
return operations.invoke(op)
@classmethod
def batch_create_exclude_constraint(
cls,
operations: BatchOperations,
constraint_name: str,
*elements: Any,
**kw: Any,
) -> Optional[Table]:
"""Issue a "create exclude constraint" instruction using the
current batch migration context.
.. note:: This method is Postgresql specific, and additionally
requires at least SQLAlchemy 1.0.
.. seealso::
:meth:`.Operations.create_exclude_constraint`
"""
kw["schema"] = operations.impl.schema
op = cls(constraint_name, operations.impl.table_name, elements, **kw)
return operations.invoke(op)
@render.renderers.dispatch_for(CreateExcludeConstraintOp)
def _add_exclude_constraint(
autogen_context: AutogenContext, op: CreateExcludeConstraintOp
) -> str:
return _exclude_constraint(op.to_constraint(), autogen_context, alter=True)
@render._constraint_renderers.dispatch_for(ExcludeConstraint)
def _render_inline_exclude_constraint(
constraint: ExcludeConstraint,
autogen_context: AutogenContext,
namespace_metadata: MetaData,
) -> str:
rendered = render._user_defined_render(
"exclude", constraint, autogen_context
)
if rendered is not False:
return rendered
return _exclude_constraint(constraint, autogen_context, False)
def _postgresql_autogenerate_prefix(autogen_context: AutogenContext) -> str:
imports = autogen_context.imports
if imports is not None:
imports.add("from sqlalchemy.dialects import postgresql")
return "postgresql."
def _exclude_constraint(
constraint: ExcludeConstraint,
autogen_context: AutogenContext,
alter: bool,
) -> str:
opts: List[Tuple[str, Union[quoted_name, str, _f_name, None]]] = []
has_batch = autogen_context._has_batch
if constraint.deferrable:
opts.append(("deferrable", str(constraint.deferrable)))
if constraint.initially:
opts.append(("initially", str(constraint.initially)))
if constraint.using:
opts.append(("using", str(constraint.using)))
if not has_batch and alter and constraint.table.schema:
opts.append(("schema", render._ident(constraint.table.schema)))
if not alter and constraint.name:
opts.append(
("name", render._render_gen_name(autogen_context, constraint.name))
)
def do_expr_where_opts():
args = [
"(%s, %r)"
% (
_render_potential_column(
sqltext, # type:ignore[arg-type]
autogen_context,
),
opstring,
)
for sqltext, name, opstring in constraint._render_exprs
]
if constraint.where is not None:
args.append(
"where=%s"
% render._render_potential_expr(
constraint.where, autogen_context
)
)
args.extend(["%s=%r" % (k, v) for k, v in opts])
return args
if alter:
args = [
repr(render._render_gen_name(autogen_context, constraint.name))
]
if not has_batch:
args += [repr(render._ident(constraint.table.name))]
args.extend(do_expr_where_opts())
return "%(prefix)screate_exclude_constraint(%(args)s)" % {
"prefix": render._alembic_autogenerate_prefix(autogen_context),
"args": ", ".join(args),
}
else:
args = do_expr_where_opts()
return "%(prefix)sExcludeConstraint(%(args)s)" % {
"prefix": _postgresql_autogenerate_prefix(autogen_context),
"args": ", ".join(args),
}
def _render_potential_column(
value: Union[
ColumnClause[Any], Column[Any], TextClause, FunctionElement[Any]
],
autogen_context: AutogenContext,
) -> str:
if isinstance(value, ColumnClause):
if value.is_literal:
# like literal_column("int8range(from, to)") in ExcludeConstraint
template = "%(prefix)sliteral_column(%(name)r)"
else:
template = "%(prefix)scolumn(%(name)r)"
return template % {
"prefix": render._sqlalchemy_autogenerate_prefix(autogen_context),
"name": value.name,
}
else:
return render._render_potential_expr(
value,
autogen_context,
wrap_in_element=isinstance(value, (TextClause, FunctionElement)),
)
|
# mypy: allow-untyped-defs, allow-incomplete-defs, allow-untyped-calls
# mypy: no-warn-return-any, allow-any-generics
from __future__ import annotations
import re
from typing import Any
from typing import Dict
from typing import Optional
from typing import TYPE_CHECKING
from typing import Union
from sqlalchemy import cast
from sqlalchemy import Computed
from sqlalchemy import JSON
from sqlalchemy import schema
from sqlalchemy import sql
from .base import alter_table
from .base import ColumnName
from .base import format_column_name
from .base import format_table_name
from .base import RenameTable
from .impl import DefaultImpl
from .. import util
from ..util.sqla_compat import compiles
if TYPE_CHECKING:
from sqlalchemy.engine.reflection import Inspector
from sqlalchemy.sql.compiler import DDLCompiler
from sqlalchemy.sql.elements import Cast
from sqlalchemy.sql.elements import ClauseElement
from sqlalchemy.sql.schema import Column
from sqlalchemy.sql.schema import Constraint
from sqlalchemy.sql.schema import Table
from sqlalchemy.sql.type_api import TypeEngine
from ..operations.batch import BatchOperationsImpl
class SQLiteImpl(DefaultImpl):
__dialect__ = "sqlite"
transactional_ddl = False
"""SQLite supports transactional DDL, but pysqlite does not:
see: http://bugs.python.org/issue10740
"""
def requires_recreate_in_batch(
self, batch_op: BatchOperationsImpl
) -> bool:
"""Return True if the given :class:`.BatchOperationsImpl`
would need the table to be recreated and copied in order to
proceed.
Normally, only returns True on SQLite when operations other
than add_column are present.
"""
for op in batch_op.batch:
if op[0] == "add_column":
col = op[1][1]
if isinstance(
col.server_default, schema.DefaultClause
) and isinstance(col.server_default.arg, sql.ClauseElement):
return True
elif (
isinstance(col.server_default, Computed)
and col.server_default.persisted
):
return True
elif op[0] not in ("create_index", "drop_index"):
return True
else:
return False
def add_constraint(self, const: Constraint):
# attempt to distinguish between an
# auto-gen constraint and an explicit one
if const._create_rule is None:
raise NotImplementedError(
"No support for ALTER of constraints in SQLite dialect. "
"Please refer to the batch mode feature which allows for "
"SQLite migrations using a copy-and-move strategy."
)
elif const._create_rule(self):
util.warn(
"Skipping unsupported ALTER for "
"creation of implicit constraint. "
"Please refer to the batch mode feature which allows for "
"SQLite migrations using a copy-and-move strategy."
)
def drop_constraint(self, const: Constraint):
if const._create_rule is None:
raise NotImplementedError(
"No support for ALTER of constraints in SQLite dialect. "
"Please refer to the batch mode feature which allows for "
"SQLite migrations using a copy-and-move strategy."
)
def compare_server_default(
self,
inspector_column: Column[Any],
metadata_column: Column[Any],
rendered_metadata_default: Optional[str],
rendered_inspector_default: Optional[str],
) -> bool:
if rendered_metadata_default is not None:
rendered_metadata_default = re.sub(
r"^\((.+)\)$", r"\1", rendered_metadata_default
)
rendered_metadata_default = re.sub(
r"^\"?'(.+)'\"?$", r"\1", rendered_metadata_default
)
if rendered_inspector_default is not None:
rendered_inspector_default = re.sub(
r"^\((.+)\)$", r"\1", rendered_inspector_default
)
rendered_inspector_default = re.sub(
r"^\"?'(.+)'\"?$", r"\1", rendered_inspector_default
)
return rendered_inspector_default != rendered_metadata_default
def _guess_if_default_is_unparenthesized_sql_expr(
self, expr: Optional[str]
) -> bool:
"""Determine if a server default is a SQL expression or a constant.
There are too many assertions that expect server defaults to round-trip
identically without parenthesis added so we will add parens only in
very specific cases.
"""
if not expr:
return False
elif re.match(r"^[0-9\.]$", expr):
return False
elif re.match(r"^'.+'$", expr):
return False
elif re.match(r"^\(.+\)$", expr):
return False
else:
return True
def autogen_column_reflect(
self,
inspector: Inspector,
table: Table,
column_info: Dict[str, Any],
) -> None:
# SQLite expression defaults require parenthesis when sent
# as DDL
if self._guess_if_default_is_unparenthesized_sql_expr(
column_info.get("default", None)
):
column_info["default"] = "(%s)" % (column_info["default"],)
def render_ddl_sql_expr(
self, expr: ClauseElement, is_server_default: bool = False, **kw
) -> str:
# SQLite expression defaults require parenthesis when sent
# as DDL
str_expr = super().render_ddl_sql_expr(
expr, is_server_default=is_server_default, **kw
)
if (
is_server_default
and self._guess_if_default_is_unparenthesized_sql_expr(str_expr)
):
str_expr = "(%s)" % (str_expr,)
return str_expr
def cast_for_batch_migrate(
self,
existing: Column[Any],
existing_transfer: Dict[str, Union[TypeEngine, Cast]],
new_type: TypeEngine,
) -> None:
if (
existing.type._type_affinity is not new_type._type_affinity
and not isinstance(new_type, JSON)
):
existing_transfer["expr"] = cast(
existing_transfer["expr"], new_type
)
def correct_for_autogen_constraints(
self,
conn_unique_constraints,
conn_indexes,
metadata_unique_constraints,
metadata_indexes,
):
self._skip_functional_indexes(metadata_indexes, conn_indexes)
@compiles(RenameTable, "sqlite")
def visit_rename_table(
element: RenameTable, compiler: DDLCompiler, **kw
) -> str:
return "%s RENAME TO %s" % (
alter_table(compiler, element.table_name, element.schema),
format_table_name(compiler, element.new_table_name, None),
)
@compiles(ColumnName, "sqlite")
def visit_column_name(element: ColumnName, compiler: DDLCompiler, **kw) -> str:
return "%s RENAME COLUMN %s TO %s" % (
alter_table(compiler, element.table_name, element.schema),
format_column_name(compiler, element.column_name),
format_column_name(compiler, element.newname),
)
# @compiles(AddColumn, 'sqlite')
# def visit_add_column(element, compiler, **kw):
# return "%s %s" % (
# alter_table(compiler, element.table_name, element.schema),
# add_column(compiler, element.column, **kw)
# )
# def add_column(compiler, column, **kw):
# text = "ADD COLUMN %s" % compiler.get_column_specification(column, **kw)
# need to modify SQLAlchemy so that the CHECK associated with a Boolean
# or Enum gets placed as part of the column constraints, not the Table
# see ticket 98
# for const in column.constraints:
# text += compiler.process(AddConstraint(const))
# return text
|
# mypy: allow-untyped-defs, allow-incomplete-defs, allow-untyped-calls
# mypy: no-warn-return-any, allow-any-generics
from __future__ import annotations
from typing import Any
from typing import ClassVar
from typing import Dict
from typing import Generic
from typing import NamedTuple
from typing import Optional
from typing import Sequence
from typing import Tuple
from typing import Type
from typing import TYPE_CHECKING
from typing import TypeVar
from typing import Union
from sqlalchemy.sql.schema import Constraint
from sqlalchemy.sql.schema import ForeignKeyConstraint
from sqlalchemy.sql.schema import Index
from sqlalchemy.sql.schema import UniqueConstraint
from typing_extensions import TypeGuard
from .. import util
from ..util import sqla_compat
if TYPE_CHECKING:
from typing import Literal
from alembic.autogenerate.api import AutogenContext
from alembic.ddl.impl import DefaultImpl
CompareConstraintType = Union[Constraint, Index]
_C = TypeVar("_C", bound=CompareConstraintType)
_clsreg: Dict[str, Type[_constraint_sig]] = {}
class ComparisonResult(NamedTuple):
status: Literal["equal", "different", "skip"]
message: str
@property
def is_equal(self) -> bool:
return self.status == "equal"
@property
def is_different(self) -> bool:
return self.status == "different"
@property
def is_skip(self) -> bool:
return self.status == "skip"
@classmethod
def Equal(cls) -> ComparisonResult:
"""the constraints are equal."""
return cls("equal", "The two constraints are equal")
@classmethod
def Different(cls, reason: Union[str, Sequence[str]]) -> ComparisonResult:
"""the constraints are different for the provided reason(s)."""
return cls("different", ", ".join(util.to_list(reason)))
@classmethod
def Skip(cls, reason: Union[str, Sequence[str]]) -> ComparisonResult:
"""the constraint cannot be compared for the provided reason(s).
The message is logged, but the constraints will be otherwise
considered equal, meaning that no migration command will be
generated.
"""
return cls("skip", ", ".join(util.to_list(reason)))
class _constraint_sig(Generic[_C]):
const: _C
_sig: Tuple[Any, ...]
name: Optional[sqla_compat._ConstraintNameDefined]
impl: DefaultImpl
_is_index: ClassVar[bool] = False
_is_fk: ClassVar[bool] = False
_is_uq: ClassVar[bool] = False
_is_metadata: bool
def __init_subclass__(cls) -> None:
cls._register()
@classmethod
def _register(cls):
raise NotImplementedError()
def __init__(
self, is_metadata: bool, impl: DefaultImpl, const: _C
) -> None:
raise NotImplementedError()
def compare_to_reflected(
self, other: _constraint_sig[Any]
) -> ComparisonResult:
assert self.impl is other.impl
assert self._is_metadata
assert not other._is_metadata
return self._compare_to_reflected(other)
def _compare_to_reflected(
self, other: _constraint_sig[_C]
) -> ComparisonResult:
raise NotImplementedError()
@classmethod
def from_constraint(
cls, is_metadata: bool, impl: DefaultImpl, constraint: _C
) -> _constraint_sig[_C]:
# these could be cached by constraint/impl, however, if the
# constraint is modified in place, then the sig is wrong. the mysql
# impl currently does this, and if we fixed that we can't be sure
# someone else might do it too, so play it safe.
sig = _clsreg[constraint.__visit_name__](is_metadata, impl, constraint)
return sig
def md_name_to_sql_name(self, context: AutogenContext) -> Optional[str]:
return sqla_compat._get_constraint_final_name(
self.const, context.dialect
)
@util.memoized_property
def is_named(self):
return sqla_compat._constraint_is_named(self.const, self.impl.dialect)
@util.memoized_property
def unnamed(self) -> Tuple[Any, ...]:
return self._sig
@util.memoized_property
def unnamed_no_options(self) -> Tuple[Any, ...]:
raise NotImplementedError()
@util.memoized_property
def _full_sig(self) -> Tuple[Any, ...]:
return (self.name,) + self.unnamed
def __eq__(self, other) -> bool:
return self._full_sig == other._full_sig
def __ne__(self, other) -> bool:
return self._full_sig != other._full_sig
def __hash__(self) -> int:
return hash(self._full_sig)
class _uq_constraint_sig(_constraint_sig[UniqueConstraint]):
_is_uq = True
@classmethod
def _register(cls) -> None:
_clsreg["unique_constraint"] = cls
is_unique = True
def __init__(
self,
is_metadata: bool,
impl: DefaultImpl,
const: UniqueConstraint,
) -> None:
self.impl = impl
self.const = const
self.name = sqla_compat.constraint_name_or_none(const.name)
self._sig = tuple(sorted([col.name for col in const.columns]))
self._is_metadata = is_metadata
@property
def column_names(self) -> Tuple[str, ...]:
return tuple([col.name for col in self.const.columns])
def _compare_to_reflected(
self, other: _constraint_sig[_C]
) -> ComparisonResult:
assert self._is_metadata
metadata_obj = self
conn_obj = other
assert is_uq_sig(conn_obj)
return self.impl.compare_unique_constraint(
metadata_obj.const, conn_obj.const
)
class _ix_constraint_sig(_constraint_sig[Index]):
_is_index = True
name: sqla_compat._ConstraintName
@classmethod
def _register(cls) -> None:
_clsreg["index"] = cls
def __init__(
self, is_metadata: bool, impl: DefaultImpl, const: Index
) -> None:
self.impl = impl
self.const = const
self.name = const.name
self.is_unique = bool(const.unique)
self._is_metadata = is_metadata
def _compare_to_reflected(
self, other: _constraint_sig[_C]
) -> ComparisonResult:
assert self._is_metadata
metadata_obj = self
conn_obj = other
assert is_index_sig(conn_obj)
return self.impl.compare_indexes(metadata_obj.const, conn_obj.const)
@util.memoized_property
def has_expressions(self):
return sqla_compat.is_expression_index(self.const)
@util.memoized_property
def column_names(self) -> Tuple[str, ...]:
return tuple([col.name for col in self.const.columns])
@util.memoized_property
def column_names_optional(self) -> Tuple[Optional[str], ...]:
return tuple(
[getattr(col, "name", None) for col in self.const.expressions]
)
@util.memoized_property
def is_named(self):
return True
@util.memoized_property
def unnamed(self):
return (self.is_unique,) + self.column_names_optional
class _fk_constraint_sig(_constraint_sig[ForeignKeyConstraint]):
_is_fk = True
@classmethod
def _register(cls) -> None:
_clsreg["foreign_key_constraint"] = cls
def __init__(
self,
is_metadata: bool,
impl: DefaultImpl,
const: ForeignKeyConstraint,
) -> None:
self._is_metadata = is_metadata
self.impl = impl
self.const = const
self.name = sqla_compat.constraint_name_or_none(const.name)
(
self.source_schema,
self.source_table,
self.source_columns,
self.target_schema,
self.target_table,
self.target_columns,
onupdate,
ondelete,
deferrable,
initially,
) = sqla_compat._fk_spec(const)
self._sig: Tuple[Any, ...] = (
self.source_schema,
self.source_table,
tuple(self.source_columns),
self.target_schema,
self.target_table,
tuple(self.target_columns),
) + (
(
(None if onupdate.lower() == "no action" else onupdate.lower())
if onupdate
else None
),
(
(None if ondelete.lower() == "no action" else ondelete.lower())
if ondelete
else None
),
# convert initially + deferrable into one three-state value
(
"initially_deferrable"
if initially and initially.lower() == "deferred"
else "deferrable" if deferrable else "not deferrable"
),
)
@util.memoized_property
def unnamed_no_options(self):
return (
self.source_schema,
self.source_table,
tuple(self.source_columns),
self.target_schema,
self.target_table,
tuple(self.target_columns),
)
def is_index_sig(sig: _constraint_sig) -> TypeGuard[_ix_constraint_sig]:
return sig._is_index
def is_uq_sig(sig: _constraint_sig) -> TypeGuard[_uq_constraint_sig]:
return sig._is_uq
def is_fk_sig(sig: _constraint_sig) -> TypeGuard[_fk_constraint_sig]:
return sig._is_fk
|
from . import mssql
from . import mysql
from . import oracle
from . import postgresql
from . import sqlite
from .impl import DefaultImpl as DefaultImpl
|
# mypy: allow-untyped-calls
from __future__ import annotations
from contextlib import contextmanager
import re
import textwrap
from typing import Any
from typing import Awaitable
from typing import Callable
from typing import Dict
from typing import Iterator
from typing import List # noqa
from typing import Mapping
from typing import NoReturn
from typing import Optional
from typing import overload
from typing import Sequence # noqa
from typing import Tuple
from typing import Type # noqa
from typing import TYPE_CHECKING
from typing import TypeVar
from typing import Union
from sqlalchemy.sql.elements import conv
from . import batch
from . import schemaobj
from .. import util
from ..util import sqla_compat
from ..util.compat import formatannotation_fwdref
from ..util.compat import inspect_formatargspec
from ..util.compat import inspect_getfullargspec
from ..util.sqla_compat import _literal_bindparam
if TYPE_CHECKING:
from typing import Literal
from sqlalchemy import Table
from sqlalchemy.engine import Connection
from sqlalchemy.sql import Executable
from sqlalchemy.sql.expression import ColumnElement
from sqlalchemy.sql.expression import TableClause
from sqlalchemy.sql.expression import TextClause
from sqlalchemy.sql.schema import Column
from sqlalchemy.sql.schema import Computed
from sqlalchemy.sql.schema import Identity
from sqlalchemy.sql.schema import SchemaItem
from sqlalchemy.types import TypeEngine
from .batch import BatchOperationsImpl
from .ops import AddColumnOp
from .ops import AddConstraintOp
from .ops import AlterColumnOp
from .ops import AlterTableOp
from .ops import BulkInsertOp
from .ops import CreateIndexOp
from .ops import CreateTableCommentOp
from .ops import CreateTableOp
from .ops import DropColumnOp
from .ops import DropConstraintOp
from .ops import DropIndexOp
from .ops import DropTableCommentOp
from .ops import DropTableOp
from .ops import ExecuteSQLOp
from .ops import MigrateOperation
from ..ddl import DefaultImpl
from ..runtime.migration import MigrationContext
__all__ = ("Operations", "BatchOperations")
_T = TypeVar("_T")
_C = TypeVar("_C", bound=Callable[..., Any])
class AbstractOperations(util.ModuleClsProxy):
"""Base class for Operations and BatchOperations.
.. versionadded:: 1.11.0
"""
impl: Union[DefaultImpl, BatchOperationsImpl]
_to_impl = util.Dispatcher()
def __init__(
self,
migration_context: MigrationContext,
impl: Optional[BatchOperationsImpl] = None,
) -> None:
"""Construct a new :class:`.Operations`
:param migration_context: a :class:`.MigrationContext`
instance.
"""
self.migration_context = migration_context
if impl is None:
self.impl = migration_context.impl
else:
self.impl = impl
self.schema_obj = schemaobj.SchemaObjects(migration_context)
@classmethod
def register_operation(
cls, name: str, sourcename: Optional[str] = None
) -> Callable[[Type[_T]], Type[_T]]:
"""Register a new operation for this class.
This method is normally used to add new operations
to the :class:`.Operations` class, and possibly the
:class:`.BatchOperations` class as well. All Alembic migration
operations are implemented via this system, however the system
is also available as a public API to facilitate adding custom
operations.
.. seealso::
:ref:`operation_plugins`
"""
def register(op_cls: Type[_T]) -> Type[_T]:
if sourcename is None:
fn = getattr(op_cls, name)
source_name = fn.__name__
else:
fn = getattr(op_cls, sourcename)
source_name = fn.__name__
spec = inspect_getfullargspec(fn)
name_args = spec[0]
assert name_args[0:2] == ["cls", "operations"]
name_args[0:2] = ["self"]
args = inspect_formatargspec(
*spec, formatannotation=formatannotation_fwdref
)
num_defaults = len(spec[3]) if spec[3] else 0
defaulted_vals: Tuple[Any, ...]
if num_defaults:
defaulted_vals = tuple(name_args[0 - num_defaults :])
else:
defaulted_vals = ()
defaulted_vals += tuple(spec[4])
# here, we are using formatargspec in a different way in order
# to get a string that will re-apply incoming arguments to a new
# function call
apply_kw = inspect_formatargspec(
name_args + spec[4],
spec[1],
spec[2],
defaulted_vals,
formatvalue=lambda x: "=" + x,
formatannotation=formatannotation_fwdref,
)
args = re.sub(
r'[_]?ForwardRef\(([\'"].+?[\'"])\)',
lambda m: m.group(1),
args,
)
func_text = textwrap.dedent(
"""\
def %(name)s%(args)s:
%(doc)r
return op_cls.%(source_name)s%(apply_kw)s
"""
% {
"name": name,
"source_name": source_name,
"args": args,
"apply_kw": apply_kw,
"doc": fn.__doc__,
}
)
globals_ = dict(globals())
globals_.update({"op_cls": op_cls})
lcl: Dict[str, Any] = {}
exec(func_text, globals_, lcl)
setattr(cls, name, lcl[name])
fn.__func__.__doc__ = (
"This method is proxied on "
"the :class:`.%s` class, via the :meth:`.%s.%s` method."
% (cls.__name__, cls.__name__, name)
)
if hasattr(fn, "_legacy_translations"):
lcl[name]._legacy_translations = fn._legacy_translations
return op_cls
return register
@classmethod
def implementation_for(cls, op_cls: Any) -> Callable[[_C], _C]:
"""Register an implementation for a given :class:`.MigrateOperation`.
This is part of the operation extensibility API.
.. seealso::
:ref:`operation_plugins` - example of use
"""
def decorate(fn: _C) -> _C:
cls._to_impl.dispatch_for(op_cls)(fn)
return fn
return decorate
@classmethod
@contextmanager
def context(
cls, migration_context: MigrationContext
) -> Iterator[Operations]:
op = Operations(migration_context)
op._install_proxy()
yield op
op._remove_proxy()
@contextmanager
def batch_alter_table(
self,
table_name: str,
schema: Optional[str] = None,
recreate: Literal["auto", "always", "never"] = "auto",
partial_reordering: Optional[Tuple[Any, ...]] = None,
copy_from: Optional[Table] = None,
table_args: Tuple[Any, ...] = (),
table_kwargs: Mapping[str, Any] = util.immutabledict(),
reflect_args: Tuple[Any, ...] = (),
reflect_kwargs: Mapping[str, Any] = util.immutabledict(),
naming_convention: Optional[Dict[str, str]] = None,
) -> Iterator[BatchOperations]:
"""Invoke a series of per-table migrations in batch.
Batch mode allows a series of operations specific to a table
to be syntactically grouped together, and allows for alternate
modes of table migration, in particular the "recreate" style of
migration required by SQLite.
"recreate" style is as follows:
1. A new table is created with the new specification, based on the
migration directives within the batch, using a temporary name.
2. the data copied from the existing table to the new table.
3. the existing table is dropped.
4. the new table is renamed to the existing table name.
The directive by default will only use "recreate" style on the
SQLite backend, and only if directives are present which require
this form, e.g. anything other than ``add_column()``. The batch
operation on other backends will proceed using standard ALTER TABLE
operations.
The method is used as a context manager, which returns an instance
of :class:`.BatchOperations`; this object is the same as
:class:`.Operations` except that table names and schema names
are omitted. E.g.::
with op.batch_alter_table("some_table") as batch_op:
batch_op.add_column(Column("foo", Integer))
batch_op.drop_column("bar")
The operations within the context manager are invoked at once
when the context is ended. When run against SQLite, if the
migrations include operations not supported by SQLite's ALTER TABLE,
the entire table will be copied to a new one with the new
specification, moving all data across as well.
The copy operation by default uses reflection to retrieve the current
structure of the table, and therefore :meth:`.batch_alter_table`
in this mode requires that the migration is run in "online" mode.
The ``copy_from`` parameter may be passed which refers to an existing
:class:`.Table` object, which will bypass this reflection step.
.. note:: The table copy operation will currently not copy
CHECK constraints, and may not copy UNIQUE constraints that are
unnamed, as is possible on SQLite. See the section
:ref:`sqlite_batch_constraints` for workarounds.
:param table_name: name of table
:param schema: optional schema name.
:param recreate: under what circumstances the table should be
recreated. At its default of ``"auto"``, the SQLite dialect will
recreate the table if any operations other than ``add_column()``,
``create_index()``, or ``drop_index()`` are
present. Other options include ``"always"`` and ``"never"``.
:param copy_from: optional :class:`~sqlalchemy.schema.Table` object
that will act as the structure of the table being copied. If omitted,
table reflection is used to retrieve the structure of the table.
.. seealso::
:ref:`batch_offline_mode`
:paramref:`~.Operations.batch_alter_table.reflect_args`
:paramref:`~.Operations.batch_alter_table.reflect_kwargs`
:param reflect_args: a sequence of additional positional arguments that
will be applied to the table structure being reflected / copied;
this may be used to pass column and constraint overrides to the
table that will be reflected, in lieu of passing the whole
:class:`~sqlalchemy.schema.Table` using
:paramref:`~.Operations.batch_alter_table.copy_from`.
:param reflect_kwargs: a dictionary of additional keyword arguments
that will be applied to the table structure being copied; this may be
used to pass additional table and reflection options to the table that
will be reflected, in lieu of passing the whole
:class:`~sqlalchemy.schema.Table` using
:paramref:`~.Operations.batch_alter_table.copy_from`.
:param table_args: a sequence of additional positional arguments that
will be applied to the new :class:`~sqlalchemy.schema.Table` when
created, in addition to those copied from the source table.
This may be used to provide additional constraints such as CHECK
constraints that may not be reflected.
:param table_kwargs: a dictionary of additional keyword arguments
that will be applied to the new :class:`~sqlalchemy.schema.Table`
when created, in addition to those copied from the source table.
This may be used to provide for additional table options that may
not be reflected.
:param naming_convention: a naming convention dictionary of the form
described at :ref:`autogen_naming_conventions` which will be applied
to the :class:`~sqlalchemy.schema.MetaData` during the reflection
process. This is typically required if one wants to drop SQLite
constraints, as these constraints will not have names when
reflected on this backend. Requires SQLAlchemy **0.9.4** or greater.
.. seealso::
:ref:`dropping_sqlite_foreign_keys`
:param partial_reordering: a list of tuples, each suggesting a desired
ordering of two or more columns in the newly created table. Requires
that :paramref:`.batch_alter_table.recreate` is set to ``"always"``.
Examples, given a table with columns "a", "b", "c", and "d":
Specify the order of all columns::
with op.batch_alter_table(
"some_table",
recreate="always",
partial_reordering=[("c", "d", "a", "b")],
) as batch_op:
pass
Ensure "d" appears before "c", and "b", appears before "a"::
with op.batch_alter_table(
"some_table",
recreate="always",
partial_reordering=[("d", "c"), ("b", "a")],
) as batch_op:
pass
The ordering of columns not included in the partial_reordering
set is undefined. Therefore it is best to specify the complete
ordering of all columns for best results.
.. note:: batch mode requires SQLAlchemy 0.8 or above.
.. seealso::
:ref:`batch_migrations`
"""
impl = batch.BatchOperationsImpl(
self,
table_name,
schema,
recreate,
copy_from,
table_args,
table_kwargs,
reflect_args,
reflect_kwargs,
naming_convention,
partial_reordering,
)
batch_op = BatchOperations(self.migration_context, impl=impl)
yield batch_op
impl.flush()
def get_context(self) -> MigrationContext:
"""Return the :class:`.MigrationContext` object that's
currently in use.
"""
return self.migration_context
@overload
def invoke(self, operation: CreateTableOp) -> Table: ...
@overload
def invoke(
self,
operation: Union[
AddConstraintOp,
DropConstraintOp,
CreateIndexOp,
DropIndexOp,
AddColumnOp,
AlterColumnOp,
AlterTableOp,
CreateTableCommentOp,
DropTableCommentOp,
DropColumnOp,
BulkInsertOp,
DropTableOp,
ExecuteSQLOp,
],
) -> None: ...
@overload
def invoke(self, operation: MigrateOperation) -> Any: ...
def invoke(self, operation: MigrateOperation) -> Any:
"""Given a :class:`.MigrateOperation`, invoke it in terms of
this :class:`.Operations` instance.
"""
fn = self._to_impl.dispatch(
operation, self.migration_context.impl.__dialect__
)
return fn(self, operation)
def f(self, name: str) -> conv:
"""Indicate a string name that has already had a naming convention
applied to it.
This feature combines with the SQLAlchemy ``naming_convention`` feature
to disambiguate constraint names that have already had naming
conventions applied to them, versus those that have not. This is
necessary in the case that the ``"%(constraint_name)s"`` token
is used within a naming convention, so that it can be identified
that this particular name should remain fixed.
If the :meth:`.Operations.f` is used on a constraint, the naming
convention will not take effect::
op.add_column("t", "x", Boolean(name=op.f("ck_bool_t_x")))
Above, the CHECK constraint generated will have the name
``ck_bool_t_x`` regardless of whether or not a naming convention is
in use.
Alternatively, if a naming convention is in use, and 'f' is not used,
names will be converted along conventions. If the ``target_metadata``
contains the naming convention
``{"ck": "ck_bool_%(table_name)s_%(constraint_name)s"}``, then the
output of the following:
op.add_column("t", "x", Boolean(name="x"))
will be::
CONSTRAINT ck_bool_t_x CHECK (x in (1, 0)))
The function is rendered in the output of autogenerate when
a particular constraint name is already converted.
"""
return conv(name)
def inline_literal(
self, value: Union[str, int], type_: Optional[TypeEngine[Any]] = None
) -> _literal_bindparam:
r"""Produce an 'inline literal' expression, suitable for
using in an INSERT, UPDATE, or DELETE statement.
When using Alembic in "offline" mode, CRUD operations
aren't compatible with SQLAlchemy's default behavior surrounding
literal values,
which is that they are converted into bound values and passed
separately into the ``execute()`` method of the DBAPI cursor.
An offline SQL
script needs to have these rendered inline. While it should
always be noted that inline literal values are an **enormous**
security hole in an application that handles untrusted input,
a schema migration is not run in this context, so
literals are safe to render inline, with the caveat that
advanced types like dates may not be supported directly
by SQLAlchemy.
See :meth:`.Operations.execute` for an example usage of
:meth:`.Operations.inline_literal`.
The environment can also be configured to attempt to render
"literal" values inline automatically, for those simple types
that are supported by the dialect; see
:paramref:`.EnvironmentContext.configure.literal_binds` for this
more recently added feature.
:param value: The value to render. Strings, integers, and simple
numerics should be supported. Other types like boolean,
dates, etc. may or may not be supported yet by various
backends.
:param type\_: optional - a :class:`sqlalchemy.types.TypeEngine`
subclass stating the type of this value. In SQLAlchemy
expressions, this is usually derived automatically
from the Python type of the value itself, as well as
based on the context in which the value is used.
.. seealso::
:paramref:`.EnvironmentContext.configure.literal_binds`
"""
return sqla_compat._literal_bindparam(None, value, type_=type_)
def get_bind(self) -> Connection:
"""Return the current 'bind'.
Under normal circumstances, this is the
:class:`~sqlalchemy.engine.Connection` currently being used
to emit SQL to the database.
In a SQL script context, this value is ``None``. [TODO: verify this]
"""
return self.migration_context.impl.bind # type: ignore[return-value]
def run_async(
self,
async_function: Callable[..., Awaitable[_T]],
*args: Any,
**kw_args: Any,
) -> _T:
"""Invoke the given asynchronous callable, passing an asynchronous
:class:`~sqlalchemy.ext.asyncio.AsyncConnection` as the first
argument.
This method allows calling async functions from within the
synchronous ``upgrade()`` or ``downgrade()`` alembic migration
method.
The async connection passed to the callable shares the same
transaction as the connection running in the migration context.
Any additional arg or kw_arg passed to this function are passed
to the provided async function.
.. versionadded: 1.11
.. note::
This method can be called only when alembic is called using
an async dialect.
"""
if not sqla_compat.sqla_14_18:
raise NotImplementedError("SQLAlchemy 1.4.18+ required")
sync_conn = self.get_bind()
if sync_conn is None:
raise NotImplementedError("Cannot call run_async in SQL mode")
if not sync_conn.dialect.is_async:
raise ValueError("Cannot call run_async with a sync engine")
from sqlalchemy.ext.asyncio import AsyncConnection
from sqlalchemy.util import await_only
async_conn = AsyncConnection._retrieve_proxy_for_target(sync_conn)
return await_only(async_function(async_conn, *args, **kw_args))
class Operations(AbstractOperations):
"""Define high level migration operations.
Each operation corresponds to some schema migration operation,
executed against a particular :class:`.MigrationContext`
which in turn represents connectivity to a database,
or a file output stream.
While :class:`.Operations` is normally configured as
part of the :meth:`.EnvironmentContext.run_migrations`
method called from an ``env.py`` script, a standalone
:class:`.Operations` instance can be
made for use cases external to regular Alembic
migrations by passing in a :class:`.MigrationContext`::
from alembic.migration import MigrationContext
from alembic.operations import Operations
conn = myengine.connect()
ctx = MigrationContext.configure(conn)
op = Operations(ctx)
op.alter_column("t", "c", nullable=True)
Note that as of 0.8, most of the methods on this class are produced
dynamically using the :meth:`.Operations.register_operation`
method.
"""
if TYPE_CHECKING:
# START STUB FUNCTIONS: op_cls
# ### the following stubs are generated by tools/write_pyi.py ###
# ### do not edit ###
def add_column(
self,
table_name: str,
column: Column[Any],
*,
schema: Optional[str] = None,
) -> None:
"""Issue an "add column" instruction using the current
migration context.
e.g.::
from alembic import op
from sqlalchemy import Column, String
op.add_column("organization", Column("name", String()))
The :meth:`.Operations.add_column` method typically corresponds
to the SQL command "ALTER TABLE... ADD COLUMN". Within the scope
of this command, the column's name, datatype, nullability,
and optional server-generated defaults may be indicated.
.. note::
With the exception of NOT NULL constraints or single-column FOREIGN
KEY constraints, other kinds of constraints such as PRIMARY KEY,
UNIQUE or CHECK constraints **cannot** be generated using this
method; for these constraints, refer to operations such as
:meth:`.Operations.create_primary_key` and
:meth:`.Operations.create_check_constraint`. In particular, the
following :class:`~sqlalchemy.schema.Column` parameters are
**ignored**:
* :paramref:`~sqlalchemy.schema.Column.primary_key` - SQL databases
typically do not support an ALTER operation that can add
individual columns one at a time to an existing primary key
constraint, therefore it's less ambiguous to use the
:meth:`.Operations.create_primary_key` method, which assumes no
existing primary key constraint is present.
* :paramref:`~sqlalchemy.schema.Column.unique` - use the
:meth:`.Operations.create_unique_constraint` method
* :paramref:`~sqlalchemy.schema.Column.index` - use the
:meth:`.Operations.create_index` method
The provided :class:`~sqlalchemy.schema.Column` object may include a
:class:`~sqlalchemy.schema.ForeignKey` constraint directive,
referencing a remote table name. For this specific type of constraint,
Alembic will automatically emit a second ALTER statement in order to
add the single-column FOREIGN KEY constraint separately::
from alembic import op
from sqlalchemy import Column, INTEGER, ForeignKey
op.add_column(
"organization",
Column("account_id", INTEGER, ForeignKey("accounts.id")),
)
The column argument passed to :meth:`.Operations.add_column` is a
:class:`~sqlalchemy.schema.Column` construct, used in the same way it's
used in SQLAlchemy. In particular, values or functions to be indicated
as producing the column's default value on the database side are
specified using the ``server_default`` parameter, and not ``default``
which only specifies Python-side defaults::
from alembic import op
from sqlalchemy import Column, TIMESTAMP, func
# specify "DEFAULT NOW" along with the column add
op.add_column(
"account",
Column("timestamp", TIMESTAMP, server_default=func.now()),
)
:param table_name: String name of the parent table.
:param column: a :class:`sqlalchemy.schema.Column` object
representing the new column.
:param schema: Optional schema name to operate within. To control
quoting of the schema outside of the default behavior, use
the SQLAlchemy construct
:class:`~sqlalchemy.sql.elements.quoted_name`.
""" # noqa: E501
...
def alter_column(
self,
table_name: str,
column_name: str,
*,
nullable: Optional[bool] = None,
comment: Union[str, Literal[False], None] = False,
server_default: Union[
str, bool, Identity, Computed, TextClause
] = False,
new_column_name: Optional[str] = None,
type_: Union[TypeEngine[Any], Type[TypeEngine[Any]], None] = None,
existing_type: Union[
TypeEngine[Any], Type[TypeEngine[Any]], None
] = None,
existing_server_default: Union[
str, bool, Identity, Computed, TextClause, None
] = False,
existing_nullable: Optional[bool] = None,
existing_comment: Optional[str] = None,
schema: Optional[str] = None,
**kw: Any,
) -> None:
r"""Issue an "alter column" instruction using the
current migration context.
Generally, only that aspect of the column which
is being changed, i.e. name, type, nullability,
default, needs to be specified. Multiple changes
can also be specified at once and the backend should
"do the right thing", emitting each change either
separately or together as the backend allows.
MySQL has special requirements here, since MySQL
cannot ALTER a column without a full specification.
When producing MySQL-compatible migration files,
it is recommended that the ``existing_type``,
``existing_server_default``, and ``existing_nullable``
parameters be present, if not being altered.
Type changes which are against the SQLAlchemy
"schema" types :class:`~sqlalchemy.types.Boolean`
and :class:`~sqlalchemy.types.Enum` may also
add or drop constraints which accompany those
types on backends that don't support them natively.
The ``existing_type`` argument is
used in this case to identify and remove a previous
constraint that was bound to the type object.
:param table_name: string name of the target table.
:param column_name: string name of the target column,
as it exists before the operation begins.
:param nullable: Optional; specify ``True`` or ``False``
to alter the column's nullability.
:param server_default: Optional; specify a string
SQL expression, :func:`~sqlalchemy.sql.expression.text`,
or :class:`~sqlalchemy.schema.DefaultClause` to indicate
an alteration to the column's default value.
Set to ``None`` to have the default removed.
:param comment: optional string text of a new comment to add to the
column.
:param new_column_name: Optional; specify a string name here to
indicate the new name within a column rename operation.
:param type\_: Optional; a :class:`~sqlalchemy.types.TypeEngine`
type object to specify a change to the column's type.
For SQLAlchemy types that also indicate a constraint (i.e.
:class:`~sqlalchemy.types.Boolean`, :class:`~sqlalchemy.types.Enum`),
the constraint is also generated.
:param autoincrement: set the ``AUTO_INCREMENT`` flag of the column;
currently understood by the MySQL dialect.
:param existing_type: Optional; a
:class:`~sqlalchemy.types.TypeEngine`
type object to specify the previous type. This
is required for all MySQL column alter operations that
don't otherwise specify a new type, as well as for
when nullability is being changed on a SQL Server
column. It is also used if the type is a so-called
SQLAlchemy "schema" type which may define a constraint (i.e.
:class:`~sqlalchemy.types.Boolean`,
:class:`~sqlalchemy.types.Enum`),
so that the constraint can be dropped.
:param existing_server_default: Optional; The existing
default value of the column. Required on MySQL if
an existing default is not being changed; else MySQL
removes the default.
:param existing_nullable: Optional; the existing nullability
of the column. Required on MySQL if the existing nullability
is not being changed; else MySQL sets this to NULL.
:param existing_autoincrement: Optional; the existing autoincrement
of the column. Used for MySQL's system of altering a column
that specifies ``AUTO_INCREMENT``.
:param existing_comment: string text of the existing comment on the
column to be maintained. Required on MySQL if the existing comment
on the column is not being changed.
:param schema: Optional schema name to operate within. To control
quoting of the schema outside of the default behavior, use
the SQLAlchemy construct
:class:`~sqlalchemy.sql.elements.quoted_name`.
:param postgresql_using: String argument which will indicate a
SQL expression to render within the Postgresql-specific USING clause
within ALTER COLUMN. This string is taken directly as raw SQL which
must explicitly include any necessary quoting or escaping of tokens
within the expression.
""" # noqa: E501
...
def bulk_insert(
self,
table: Union[Table, TableClause],
rows: List[Dict[str, Any]],
*,
multiinsert: bool = True,
) -> None:
"""Issue a "bulk insert" operation using the current
migration context.
This provides a means of representing an INSERT of multiple rows
which works equally well in the context of executing on a live
connection as well as that of generating a SQL script. In the
case of a SQL script, the values are rendered inline into the
statement.
e.g.::
from alembic import op
from datetime import date
from sqlalchemy.sql import table, column
from sqlalchemy import String, Integer, Date
# Create an ad-hoc table to use for the insert statement.
accounts_table = table(
"account",
column("id", Integer),
column("name", String),
column("create_date", Date),
)
op.bulk_insert(
accounts_table,
[
{
"id": 1,
"name": "John Smith",
"create_date": date(2010, 10, 5),
},
{
"id": 2,
"name": "Ed Williams",
"create_date": date(2007, 5, 27),
},
{
"id": 3,
"name": "Wendy Jones",
"create_date": date(2008, 8, 15),
},
],
)
When using --sql mode, some datatypes may not render inline
automatically, such as dates and other special types. When this
issue is present, :meth:`.Operations.inline_literal` may be used::
op.bulk_insert(
accounts_table,
[
{
"id": 1,
"name": "John Smith",
"create_date": op.inline_literal("2010-10-05"),
},
{
"id": 2,
"name": "Ed Williams",
"create_date": op.inline_literal("2007-05-27"),
},
{
"id": 3,
"name": "Wendy Jones",
"create_date": op.inline_literal("2008-08-15"),
},
],
multiinsert=False,
)
When using :meth:`.Operations.inline_literal` in conjunction with
:meth:`.Operations.bulk_insert`, in order for the statement to work
in "online" (e.g. non --sql) mode, the
:paramref:`~.Operations.bulk_insert.multiinsert`
flag should be set to ``False``, which will have the effect of
individual INSERT statements being emitted to the database, each
with a distinct VALUES clause, so that the "inline" values can
still be rendered, rather than attempting to pass the values
as bound parameters.
:param table: a table object which represents the target of the INSERT.
:param rows: a list of dictionaries indicating rows.
:param multiinsert: when at its default of True and --sql mode is not
enabled, the INSERT statement will be executed using
"executemany()" style, where all elements in the list of
dictionaries are passed as bound parameters in a single
list. Setting this to False results in individual INSERT
statements being emitted per parameter set, and is needed
in those cases where non-literal values are present in the
parameter sets.
""" # noqa: E501
...
def create_check_constraint(
self,
constraint_name: Optional[str],
table_name: str,
condition: Union[str, ColumnElement[bool], TextClause],
*,
schema: Optional[str] = None,
**kw: Any,
) -> None:
"""Issue a "create check constraint" instruction using the
current migration context.
e.g.::
from alembic import op
from sqlalchemy.sql import column, func
op.create_check_constraint(
"ck_user_name_len",
"user",
func.len(column("name")) > 5,
)
CHECK constraints are usually against a SQL expression, so ad-hoc
table metadata is usually needed. The function will convert the given
arguments into a :class:`sqlalchemy.schema.CheckConstraint` bound
to an anonymous table in order to emit the CREATE statement.
:param name: Name of the check constraint. The name is necessary
so that an ALTER statement can be emitted. For setups that
use an automated naming scheme such as that described at
:ref:`sqla:constraint_naming_conventions`,
``name`` here can be ``None``, as the event listener will
apply the name to the constraint object when it is associated
with the table.
:param table_name: String name of the source table.
:param condition: SQL expression that's the condition of the
constraint. Can be a string or SQLAlchemy expression language
structure.
:param deferrable: optional bool. If set, emit DEFERRABLE or
NOT DEFERRABLE when issuing DDL for this constraint.
:param initially: optional string. If set, emit INITIALLY <value>
when issuing DDL for this constraint.
:param schema: Optional schema name to operate within. To control
quoting of the schema outside of the default behavior, use
the SQLAlchemy construct
:class:`~sqlalchemy.sql.elements.quoted_name`.
""" # noqa: E501
...
def create_exclude_constraint(
self,
constraint_name: str,
table_name: str,
*elements: Any,
**kw: Any,
) -> Optional[Table]:
"""Issue an alter to create an EXCLUDE constraint using the
current migration context.
.. note:: This method is Postgresql specific, and additionally
requires at least SQLAlchemy 1.0.
e.g.::
from alembic import op
op.create_exclude_constraint(
"user_excl",
"user",
("period", "&&"),
("group", "="),
where=("group != 'some group'"),
)
Note that the expressions work the same way as that of
the ``ExcludeConstraint`` object itself; if plain strings are
passed, quoting rules must be applied manually.
:param name: Name of the constraint.
:param table_name: String name of the source table.
:param elements: exclude conditions.
:param where: SQL expression or SQL string with optional WHERE
clause.
:param deferrable: optional bool. If set, emit DEFERRABLE or
NOT DEFERRABLE when issuing DDL for this constraint.
:param initially: optional string. If set, emit INITIALLY <value>
when issuing DDL for this constraint.
:param schema: Optional schema name to operate within.
""" # noqa: E501
...
def create_foreign_key(
self,
constraint_name: Optional[str],
source_table: str,
referent_table: str,
local_cols: List[str],
remote_cols: List[str],
*,
onupdate: Optional[str] = None,
ondelete: Optional[str] = None,
deferrable: Optional[bool] = None,
initially: Optional[str] = None,
match: Optional[str] = None,
source_schema: Optional[str] = None,
referent_schema: Optional[str] = None,
**dialect_kw: Any,
) -> None:
"""Issue a "create foreign key" instruction using the
current migration context.
e.g.::
from alembic import op
op.create_foreign_key(
"fk_user_address",
"address",
"user",
["user_id"],
["id"],
)
This internally generates a :class:`~sqlalchemy.schema.Table` object
containing the necessary columns, then generates a new
:class:`~sqlalchemy.schema.ForeignKeyConstraint`
object which it then associates with the
:class:`~sqlalchemy.schema.Table`.
Any event listeners associated with this action will be fired
off normally. The :class:`~sqlalchemy.schema.AddConstraint`
construct is ultimately used to generate the ALTER statement.
:param constraint_name: Name of the foreign key constraint. The name
is necessary so that an ALTER statement can be emitted. For setups
that use an automated naming scheme such as that described at
:ref:`sqla:constraint_naming_conventions`,
``name`` here can be ``None``, as the event listener will
apply the name to the constraint object when it is associated
with the table.
:param source_table: String name of the source table.
:param referent_table: String name of the destination table.
:param local_cols: a list of string column names in the
source table.
:param remote_cols: a list of string column names in the
remote table.
:param onupdate: Optional string. If set, emit ON UPDATE <value> when
issuing DDL for this constraint. Typical values include CASCADE,
DELETE and RESTRICT.
:param ondelete: Optional string. If set, emit ON DELETE <value> when
issuing DDL for this constraint. Typical values include CASCADE,
DELETE and RESTRICT.
:param deferrable: optional bool. If set, emit DEFERRABLE or NOT
DEFERRABLE when issuing DDL for this constraint.
:param source_schema: Optional schema name of the source table.
:param referent_schema: Optional schema name of the destination table.
""" # noqa: E501
...
def create_index(
self,
index_name: Optional[str],
table_name: str,
columns: Sequence[Union[str, TextClause, ColumnElement[Any]]],
*,
schema: Optional[str] = None,
unique: bool = False,
if_not_exists: Optional[bool] = None,
**kw: Any,
) -> None:
r"""Issue a "create index" instruction using the current
migration context.
e.g.::
from alembic import op
op.create_index("ik_test", "t1", ["foo", "bar"])
Functional indexes can be produced by using the
:func:`sqlalchemy.sql.expression.text` construct::
from alembic import op
from sqlalchemy import text
op.create_index("ik_test", "t1", [text("lower(foo)")])
:param index_name: name of the index.
:param table_name: name of the owning table.
:param columns: a list consisting of string column names and/or
:func:`~sqlalchemy.sql.expression.text` constructs.
:param schema: Optional schema name to operate within. To control
quoting of the schema outside of the default behavior, use
the SQLAlchemy construct
:class:`~sqlalchemy.sql.elements.quoted_name`.
:param unique: If True, create a unique index.
:param quote: Force quoting of this column's name on or off,
corresponding to ``True`` or ``False``. When left at its default
of ``None``, the column identifier will be quoted according to
whether the name is case sensitive (identifiers with at least one
upper case character are treated as case sensitive), or if it's a
reserved word. This flag is only needed to force quoting of a
reserved word which is not known by the SQLAlchemy dialect.
:param if_not_exists: If True, adds IF NOT EXISTS operator when
creating the new index.
.. versionadded:: 1.12.0
:param \**kw: Additional keyword arguments not mentioned above are
dialect specific, and passed in the form
``<dialectname>_<argname>``.
See the documentation regarding an individual dialect at
:ref:`dialect_toplevel` for detail on documented arguments.
""" # noqa: E501
...
def create_primary_key(
self,
constraint_name: Optional[str],
table_name: str,
columns: List[str],
*,
schema: Optional[str] = None,
) -> None:
"""Issue a "create primary key" instruction using the current
migration context.
e.g.::
from alembic import op
op.create_primary_key("pk_my_table", "my_table", ["id", "version"])
This internally generates a :class:`~sqlalchemy.schema.Table` object
containing the necessary columns, then generates a new
:class:`~sqlalchemy.schema.PrimaryKeyConstraint`
object which it then associates with the
:class:`~sqlalchemy.schema.Table`.
Any event listeners associated with this action will be fired
off normally. The :class:`~sqlalchemy.schema.AddConstraint`
construct is ultimately used to generate the ALTER statement.
:param constraint_name: Name of the primary key constraint. The name
is necessary so that an ALTER statement can be emitted. For setups
that use an automated naming scheme such as that described at
:ref:`sqla:constraint_naming_conventions`
``name`` here can be ``None``, as the event listener will
apply the name to the constraint object when it is associated
with the table.
:param table_name: String name of the target table.
:param columns: a list of string column names to be applied to the
primary key constraint.
:param schema: Optional schema name to operate within. To control
quoting of the schema outside of the default behavior, use
the SQLAlchemy construct
:class:`~sqlalchemy.sql.elements.quoted_name`.
""" # noqa: E501
...
def create_table(
self,
table_name: str,
*columns: SchemaItem,
if_not_exists: Optional[bool] = None,
**kw: Any,
) -> Table:
r"""Issue a "create table" instruction using the current migration
context.
This directive receives an argument list similar to that of the
traditional :class:`sqlalchemy.schema.Table` construct, but without the
metadata::
from sqlalchemy import INTEGER, VARCHAR, NVARCHAR, Column
from alembic import op
op.create_table(
"account",
Column("id", INTEGER, primary_key=True),
Column("name", VARCHAR(50), nullable=False),
Column("description", NVARCHAR(200)),
Column("timestamp", TIMESTAMP, server_default=func.now()),
)
Note that :meth:`.create_table` accepts
:class:`~sqlalchemy.schema.Column`
constructs directly from the SQLAlchemy library. In particular,
default values to be created on the database side are
specified using the ``server_default`` parameter, and not
``default`` which only specifies Python-side defaults::
from alembic import op
from sqlalchemy import Column, TIMESTAMP, func
# specify "DEFAULT NOW" along with the "timestamp" column
op.create_table(
"account",
Column("id", INTEGER, primary_key=True),
Column("timestamp", TIMESTAMP, server_default=func.now()),
)
The function also returns a newly created
:class:`~sqlalchemy.schema.Table` object, corresponding to the table
specification given, which is suitable for
immediate SQL operations, in particular
:meth:`.Operations.bulk_insert`::
from sqlalchemy import INTEGER, VARCHAR, NVARCHAR, Column
from alembic import op
account_table = op.create_table(
"account",
Column("id", INTEGER, primary_key=True),
Column("name", VARCHAR(50), nullable=False),
Column("description", NVARCHAR(200)),
Column("timestamp", TIMESTAMP, server_default=func.now()),
)
op.bulk_insert(
account_table,
[
{"name": "A1", "description": "account 1"},
{"name": "A2", "description": "account 2"},
],
)
:param table_name: Name of the table
:param \*columns: collection of :class:`~sqlalchemy.schema.Column`
objects within
the table, as well as optional :class:`~sqlalchemy.schema.Constraint`
objects
and :class:`~.sqlalchemy.schema.Index` objects.
:param schema: Optional schema name to operate within. To control
quoting of the schema outside of the default behavior, use
the SQLAlchemy construct
:class:`~sqlalchemy.sql.elements.quoted_name`.
:param if_not_exists: If True, adds IF NOT EXISTS operator when
creating the new table.
.. versionadded:: 1.13.3
:param \**kw: Other keyword arguments are passed to the underlying
:class:`sqlalchemy.schema.Table` object created for the command.
:return: the :class:`~sqlalchemy.schema.Table` object corresponding
to the parameters given.
""" # noqa: E501
...
def create_table_comment(
self,
table_name: str,
comment: Optional[str],
*,
existing_comment: Optional[str] = None,
schema: Optional[str] = None,
) -> None:
"""Emit a COMMENT ON operation to set the comment for a table.
:param table_name: string name of the target table.
:param comment: string value of the comment being registered against
the specified table.
:param existing_comment: String value of a comment
already registered on the specified table, used within autogenerate
so that the operation is reversible, but not required for direct
use.
.. seealso::
:meth:`.Operations.drop_table_comment`
:paramref:`.Operations.alter_column.comment`
""" # noqa: E501
...
def create_unique_constraint(
self,
constraint_name: Optional[str],
table_name: str,
columns: Sequence[str],
*,
schema: Optional[str] = None,
**kw: Any,
) -> Any:
"""Issue a "create unique constraint" instruction using the
current migration context.
e.g.::
from alembic import op
op.create_unique_constraint("uq_user_name", "user", ["name"])
This internally generates a :class:`~sqlalchemy.schema.Table` object
containing the necessary columns, then generates a new
:class:`~sqlalchemy.schema.UniqueConstraint`
object which it then associates with the
:class:`~sqlalchemy.schema.Table`.
Any event listeners associated with this action will be fired
off normally. The :class:`~sqlalchemy.schema.AddConstraint`
construct is ultimately used to generate the ALTER statement.
:param name: Name of the unique constraint. The name is necessary
so that an ALTER statement can be emitted. For setups that
use an automated naming scheme such as that described at
:ref:`sqla:constraint_naming_conventions`,
``name`` here can be ``None``, as the event listener will
apply the name to the constraint object when it is associated
with the table.
:param table_name: String name of the source table.
:param columns: a list of string column names in the
source table.
:param deferrable: optional bool. If set, emit DEFERRABLE or
NOT DEFERRABLE when issuing DDL for this constraint.
:param initially: optional string. If set, emit INITIALLY <value>
when issuing DDL for this constraint.
:param schema: Optional schema name to operate within. To control
quoting of the schema outside of the default behavior, use
the SQLAlchemy construct
:class:`~sqlalchemy.sql.elements.quoted_name`.
""" # noqa: E501
...
def drop_column(
self,
table_name: str,
column_name: str,
*,
schema: Optional[str] = None,
**kw: Any,
) -> None:
"""Issue a "drop column" instruction using the current
migration context.
e.g.::
drop_column("organization", "account_id")
:param table_name: name of table
:param column_name: name of column
:param schema: Optional schema name to operate within. To control
quoting of the schema outside of the default behavior, use
the SQLAlchemy construct
:class:`~sqlalchemy.sql.elements.quoted_name`.
:param mssql_drop_check: Optional boolean. When ``True``, on
Microsoft SQL Server only, first
drop the CHECK constraint on the column using a
SQL-script-compatible
block that selects into a @variable from sys.check_constraints,
then exec's a separate DROP CONSTRAINT for that constraint.
:param mssql_drop_default: Optional boolean. When ``True``, on
Microsoft SQL Server only, first
drop the DEFAULT constraint on the column using a
SQL-script-compatible
block that selects into a @variable from sys.default_constraints,
then exec's a separate DROP CONSTRAINT for that default.
:param mssql_drop_foreign_key: Optional boolean. When ``True``, on
Microsoft SQL Server only, first
drop a single FOREIGN KEY constraint on the column using a
SQL-script-compatible
block that selects into a @variable from
sys.foreign_keys/sys.foreign_key_columns,
then exec's a separate DROP CONSTRAINT for that default. Only
works if the column has exactly one FK constraint which refers to
it, at the moment.
""" # noqa: E501
...
def drop_constraint(
self,
constraint_name: str,
table_name: str,
type_: Optional[str] = None,
*,
schema: Optional[str] = None,
) -> None:
r"""Drop a constraint of the given name, typically via DROP CONSTRAINT.
:param constraint_name: name of the constraint.
:param table_name: table name.
:param type\_: optional, required on MySQL. can be
'foreignkey', 'primary', 'unique', or 'check'.
:param schema: Optional schema name to operate within. To control
quoting of the schema outside of the default behavior, use
the SQLAlchemy construct
:class:`~sqlalchemy.sql.elements.quoted_name`.
""" # noqa: E501
...
def drop_index(
self,
index_name: str,
table_name: Optional[str] = None,
*,
schema: Optional[str] = None,
if_exists: Optional[bool] = None,
**kw: Any,
) -> None:
r"""Issue a "drop index" instruction using the current
migration context.
e.g.::
drop_index("accounts")
:param index_name: name of the index.
:param table_name: name of the owning table. Some
backends such as Microsoft SQL Server require this.
:param schema: Optional schema name to operate within. To control
quoting of the schema outside of the default behavior, use
the SQLAlchemy construct
:class:`~sqlalchemy.sql.elements.quoted_name`.
:param if_exists: If True, adds IF EXISTS operator when
dropping the index.
.. versionadded:: 1.12.0
:param \**kw: Additional keyword arguments not mentioned above are
dialect specific, and passed in the form
``<dialectname>_<argname>``.
See the documentation regarding an individual dialect at
:ref:`dialect_toplevel` for detail on documented arguments.
""" # noqa: E501
...
def drop_table(
self,
table_name: str,
*,
schema: Optional[str] = None,
if_exists: Optional[bool] = None,
**kw: Any,
) -> None:
r"""Issue a "drop table" instruction using the current
migration context.
e.g.::
drop_table("accounts")
:param table_name: Name of the table
:param schema: Optional schema name to operate within. To control
quoting of the schema outside of the default behavior, use
the SQLAlchemy construct
:class:`~sqlalchemy.sql.elements.quoted_name`.
:param if_exists: If True, adds IF EXISTS operator when
dropping the table.
.. versionadded:: 1.13.3
:param \**kw: Other keyword arguments are passed to the underlying
:class:`sqlalchemy.schema.Table` object created for the command.
""" # noqa: E501
...
def drop_table_comment(
self,
table_name: str,
*,
existing_comment: Optional[str] = None,
schema: Optional[str] = None,
) -> None:
"""Issue a "drop table comment" operation to
remove an existing comment set on a table.
:param table_name: string name of the target table.
:param existing_comment: An optional string value of a comment already
registered on the specified table.
.. seealso::
:meth:`.Operations.create_table_comment`
:paramref:`.Operations.alter_column.comment`
""" # noqa: E501
...
def execute(
self,
sqltext: Union[Executable, str],
*,
execution_options: Optional[dict[str, Any]] = None,
) -> None:
r"""Execute the given SQL using the current migration context.
The given SQL can be a plain string, e.g.::
op.execute("INSERT INTO table (foo) VALUES ('some value')")
Or it can be any kind of Core SQL Expression construct, such as
below where we use an update construct::
from sqlalchemy.sql import table, column
from sqlalchemy import String
from alembic import op
account = table("account", column("name", String))
op.execute(
account.update()
.where(account.c.name == op.inline_literal("account 1"))
.values({"name": op.inline_literal("account 2")})
)
Above, we made use of the SQLAlchemy
:func:`sqlalchemy.sql.expression.table` and
:func:`sqlalchemy.sql.expression.column` constructs to make a brief,
ad-hoc table construct just for our UPDATE statement. A full
:class:`~sqlalchemy.schema.Table` construct of course works perfectly
fine as well, though note it's a recommended practice to at least
ensure the definition of a table is self-contained within the migration
script, rather than imported from a module that may break compatibility
with older migrations.
In a SQL script context, the statement is emitted directly to the
output stream. There is *no* return result, however, as this
function is oriented towards generating a change script
that can run in "offline" mode. Additionally, parameterized
statements are discouraged here, as they *will not work* in offline
mode. Above, we use :meth:`.inline_literal` where parameters are
to be used.
For full interaction with a connected database where parameters can
also be used normally, use the "bind" available from the context::
from alembic import op
connection = op.get_bind()
connection.execute(
account.update()
.where(account.c.name == "account 1")
.values({"name": "account 2"})
)
Additionally, when passing the statement as a plain string, it is first
coerced into a :func:`sqlalchemy.sql.expression.text` construct
before being passed along. In the less likely case that the
literal SQL string contains a colon, it must be escaped with a
backslash, as::
op.execute(r"INSERT INTO table (foo) VALUES ('\:colon_value')")
:param sqltext: Any legal SQLAlchemy expression, including:
* a string
* a :func:`sqlalchemy.sql.expression.text` construct.
* a :func:`sqlalchemy.sql.expression.insert` construct.
* a :func:`sqlalchemy.sql.expression.update` construct.
* a :func:`sqlalchemy.sql.expression.delete` construct.
* Any "executable" described in SQLAlchemy Core documentation,
noting that no result set is returned.
.. note:: when passing a plain string, the statement is coerced into
a :func:`sqlalchemy.sql.expression.text` construct. This construct
considers symbols with colons, e.g. ``:foo`` to be bound parameters.
To avoid this, ensure that colon symbols are escaped, e.g.
``\:foo``.
:param execution_options: Optional dictionary of
execution options, will be passed to
:meth:`sqlalchemy.engine.Connection.execution_options`.
""" # noqa: E501
...
def rename_table(
self,
old_table_name: str,
new_table_name: str,
*,
schema: Optional[str] = None,
) -> None:
"""Emit an ALTER TABLE to rename a table.
:param old_table_name: old name.
:param new_table_name: new name.
:param schema: Optional schema name to operate within. To control
quoting of the schema outside of the default behavior, use
the SQLAlchemy construct
:class:`~sqlalchemy.sql.elements.quoted_name`.
""" # noqa: E501
...
# END STUB FUNCTIONS: op_cls
class BatchOperations(AbstractOperations):
"""Modifies the interface :class:`.Operations` for batch mode.
This basically omits the ``table_name`` and ``schema`` parameters
from associated methods, as these are a given when running under batch
mode.
.. seealso::
:meth:`.Operations.batch_alter_table`
Note that as of 0.8, most of the methods on this class are produced
dynamically using the :meth:`.Operations.register_operation`
method.
"""
impl: BatchOperationsImpl
def _noop(self, operation: Any) -> NoReturn:
raise NotImplementedError(
"The %s method does not apply to a batch table alter operation."
% operation
)
if TYPE_CHECKING:
# START STUB FUNCTIONS: batch_op
# ### the following stubs are generated by tools/write_pyi.py ###
# ### do not edit ###
def add_column(
self,
column: Column[Any],
*,
insert_before: Optional[str] = None,
insert_after: Optional[str] = None,
) -> None:
"""Issue an "add column" instruction using the current
batch migration context.
.. seealso::
:meth:`.Operations.add_column`
""" # noqa: E501
...
def alter_column(
self,
column_name: str,
*,
nullable: Optional[bool] = None,
comment: Union[str, Literal[False], None] = False,
server_default: Any = False,
new_column_name: Optional[str] = None,
type_: Union[TypeEngine[Any], Type[TypeEngine[Any]], None] = None,
existing_type: Union[
TypeEngine[Any], Type[TypeEngine[Any]], None
] = None,
existing_server_default: Union[
str, bool, Identity, Computed, None
] = False,
existing_nullable: Optional[bool] = None,
existing_comment: Optional[str] = None,
insert_before: Optional[str] = None,
insert_after: Optional[str] = None,
**kw: Any,
) -> None:
"""Issue an "alter column" instruction using the current
batch migration context.
Parameters are the same as that of :meth:`.Operations.alter_column`,
as well as the following option(s):
:param insert_before: String name of an existing column which this
column should be placed before, when creating the new table.
:param insert_after: String name of an existing column which this
column should be placed after, when creating the new table. If
both :paramref:`.BatchOperations.alter_column.insert_before`
and :paramref:`.BatchOperations.alter_column.insert_after` are
omitted, the column is inserted after the last existing column
in the table.
.. seealso::
:meth:`.Operations.alter_column`
""" # noqa: E501
...
def create_check_constraint(
self,
constraint_name: str,
condition: Union[str, ColumnElement[bool], TextClause],
**kw: Any,
) -> None:
"""Issue a "create check constraint" instruction using the
current batch migration context.
The batch form of this call omits the ``source`` and ``schema``
arguments from the call.
.. seealso::
:meth:`.Operations.create_check_constraint`
""" # noqa: E501
...
def create_exclude_constraint(
self, constraint_name: str, *elements: Any, **kw: Any
) -> Optional[Table]:
"""Issue a "create exclude constraint" instruction using the
current batch migration context.
.. note:: This method is Postgresql specific, and additionally
requires at least SQLAlchemy 1.0.
.. seealso::
:meth:`.Operations.create_exclude_constraint`
""" # noqa: E501
...
def create_foreign_key(
self,
constraint_name: Optional[str],
referent_table: str,
local_cols: List[str],
remote_cols: List[str],
*,
referent_schema: Optional[str] = None,
onupdate: Optional[str] = None,
ondelete: Optional[str] = None,
deferrable: Optional[bool] = None,
initially: Optional[str] = None,
match: Optional[str] = None,
**dialect_kw: Any,
) -> None:
"""Issue a "create foreign key" instruction using the
current batch migration context.
The batch form of this call omits the ``source`` and ``source_schema``
arguments from the call.
e.g.::
with batch_alter_table("address") as batch_op:
batch_op.create_foreign_key(
"fk_user_address",
"user",
["user_id"],
["id"],
)
.. seealso::
:meth:`.Operations.create_foreign_key`
""" # noqa: E501
...
def create_index(
self, index_name: str, columns: List[str], **kw: Any
) -> None:
"""Issue a "create index" instruction using the
current batch migration context.
.. seealso::
:meth:`.Operations.create_index`
""" # noqa: E501
...
def create_primary_key(
self, constraint_name: Optional[str], columns: List[str]
) -> None:
"""Issue a "create primary key" instruction using the
current batch migration context.
The batch form of this call omits the ``table_name`` and ``schema``
arguments from the call.
.. seealso::
:meth:`.Operations.create_primary_key`
""" # noqa: E501
...
def create_table_comment(
self,
comment: Optional[str],
*,
existing_comment: Optional[str] = None,
) -> None:
"""Emit a COMMENT ON operation to set the comment for a table
using the current batch migration context.
:param comment: string value of the comment being registered against
the specified table.
:param existing_comment: String value of a comment
already registered on the specified table, used within autogenerate
so that the operation is reversible, but not required for direct
use.
""" # noqa: E501
...
def create_unique_constraint(
self, constraint_name: str, columns: Sequence[str], **kw: Any
) -> Any:
"""Issue a "create unique constraint" instruction using the
current batch migration context.
The batch form of this call omits the ``source`` and ``schema``
arguments from the call.
.. seealso::
:meth:`.Operations.create_unique_constraint`
""" # noqa: E501
...
def drop_column(self, column_name: str, **kw: Any) -> None:
"""Issue a "drop column" instruction using the current
batch migration context.
.. seealso::
:meth:`.Operations.drop_column`
""" # noqa: E501
...
def drop_constraint(
self, constraint_name: str, type_: Optional[str] = None
) -> None:
"""Issue a "drop constraint" instruction using the
current batch migration context.
The batch form of this call omits the ``table_name`` and ``schema``
arguments from the call.
.. seealso::
:meth:`.Operations.drop_constraint`
""" # noqa: E501
...
def drop_index(self, index_name: str, **kw: Any) -> None:
"""Issue a "drop index" instruction using the
current batch migration context.
.. seealso::
:meth:`.Operations.drop_index`
""" # noqa: E501
...
def drop_table_comment(
self, *, existing_comment: Optional[str] = None
) -> None:
"""Issue a "drop table comment" operation to
remove an existing comment set on a table using the current
batch operations context.
:param existing_comment: An optional string value of a comment already
registered on the specified table.
""" # noqa: E501
...
def execute(
self,
sqltext: Union[Executable, str],
*,
execution_options: Optional[dict[str, Any]] = None,
) -> None:
"""Execute the given SQL using the current migration context.
.. seealso::
:meth:`.Operations.execute`
""" # noqa: E501
...
# END STUB FUNCTIONS: batch_op
|
# mypy: allow-untyped-defs, allow-incomplete-defs, allow-untyped-calls
# mypy: no-warn-return-any, allow-any-generics
from __future__ import annotations
from typing import Any
from typing import Dict
from typing import List
from typing import Optional
from typing import Tuple
from typing import TYPE_CHECKING
from typing import Union
from sqlalchemy import CheckConstraint
from sqlalchemy import Column
from sqlalchemy import ForeignKeyConstraint
from sqlalchemy import Index
from sqlalchemy import MetaData
from sqlalchemy import PrimaryKeyConstraint
from sqlalchemy import schema as sql_schema
from sqlalchemy import select
from sqlalchemy import Table
from sqlalchemy import types as sqltypes
from sqlalchemy.sql.schema import SchemaEventTarget
from sqlalchemy.util import OrderedDict
from sqlalchemy.util import topological
from ..util import exc
from ..util.sqla_compat import _columns_for_constraint
from ..util.sqla_compat import _copy
from ..util.sqla_compat import _copy_expression
from ..util.sqla_compat import _ensure_scope_for_ddl
from ..util.sqla_compat import _fk_is_self_referential
from ..util.sqla_compat import _idx_table_bound_expressions
from ..util.sqla_compat import _is_type_bound
from ..util.sqla_compat import _remove_column_from_collection
from ..util.sqla_compat import _resolve_for_variant
from ..util.sqla_compat import constraint_name_defined
from ..util.sqla_compat import constraint_name_string
if TYPE_CHECKING:
from typing import Literal
from sqlalchemy.engine import Dialect
from sqlalchemy.sql.elements import ColumnClause
from sqlalchemy.sql.elements import quoted_name
from sqlalchemy.sql.functions import Function
from sqlalchemy.sql.schema import Constraint
from sqlalchemy.sql.type_api import TypeEngine
from ..ddl.impl import DefaultImpl
class BatchOperationsImpl:
def __init__(
self,
operations,
table_name,
schema,
recreate,
copy_from,
table_args,
table_kwargs,
reflect_args,
reflect_kwargs,
naming_convention,
partial_reordering,
):
self.operations = operations
self.table_name = table_name
self.schema = schema
if recreate not in ("auto", "always", "never"):
raise ValueError(
"recreate may be one of 'auto', 'always', or 'never'."
)
self.recreate = recreate
self.copy_from = copy_from
self.table_args = table_args
self.table_kwargs = dict(table_kwargs)
self.reflect_args = reflect_args
self.reflect_kwargs = dict(reflect_kwargs)
self.reflect_kwargs.setdefault(
"listeners", list(self.reflect_kwargs.get("listeners", ()))
)
self.reflect_kwargs["listeners"].append(
("column_reflect", operations.impl.autogen_column_reflect)
)
self.naming_convention = naming_convention
self.partial_reordering = partial_reordering
self.batch = []
@property
def dialect(self) -> Dialect:
return self.operations.impl.dialect
@property
def impl(self) -> DefaultImpl:
return self.operations.impl
def _should_recreate(self) -> bool:
if self.recreate == "auto":
return self.operations.impl.requires_recreate_in_batch(self)
elif self.recreate == "always":
return True
else:
return False
def flush(self) -> None:
should_recreate = self._should_recreate()
with _ensure_scope_for_ddl(self.impl.connection):
if not should_recreate:
for opname, arg, kw in self.batch:
fn = getattr(self.operations.impl, opname)
fn(*arg, **kw)
else:
if self.naming_convention:
m1 = MetaData(naming_convention=self.naming_convention)
else:
m1 = MetaData()
if self.copy_from is not None:
existing_table = self.copy_from
reflected = False
else:
if self.operations.migration_context.as_sql:
raise exc.CommandError(
f"This operation cannot proceed in --sql mode; "
f"batch mode with dialect "
f"{self.operations.migration_context.dialect.name} " # noqa: E501
f"requires a live database connection with which "
f'to reflect the table "{self.table_name}". '
f"To generate a batch SQL migration script using "
"table "
'"move and copy", a complete Table object '
f'should be passed to the "copy_from" argument '
"of the batch_alter_table() method so that table "
"reflection can be skipped."
)
existing_table = Table(
self.table_name,
m1,
schema=self.schema,
autoload_with=self.operations.get_bind(),
*self.reflect_args,
**self.reflect_kwargs,
)
reflected = True
batch_impl = ApplyBatchImpl(
self.impl,
existing_table,
self.table_args,
self.table_kwargs,
reflected,
partial_reordering=self.partial_reordering,
)
for opname, arg, kw in self.batch:
fn = getattr(batch_impl, opname)
fn(*arg, **kw)
batch_impl._create(self.impl)
def alter_column(self, *arg, **kw) -> None:
self.batch.append(("alter_column", arg, kw))
def add_column(self, *arg, **kw) -> None:
if (
"insert_before" in kw or "insert_after" in kw
) and not self._should_recreate():
raise exc.CommandError(
"Can't specify insert_before or insert_after when using "
"ALTER; please specify recreate='always'"
)
self.batch.append(("add_column", arg, kw))
def drop_column(self, *arg, **kw) -> None:
self.batch.append(("drop_column", arg, kw))
def add_constraint(self, const: Constraint) -> None:
self.batch.append(("add_constraint", (const,), {}))
def drop_constraint(self, const: Constraint) -> None:
self.batch.append(("drop_constraint", (const,), {}))
def rename_table(self, *arg, **kw):
self.batch.append(("rename_table", arg, kw))
def create_index(self, idx: Index, **kw: Any) -> None:
self.batch.append(("create_index", (idx,), kw))
def drop_index(self, idx: Index, **kw: Any) -> None:
self.batch.append(("drop_index", (idx,), kw))
def create_table_comment(self, table):
self.batch.append(("create_table_comment", (table,), {}))
def drop_table_comment(self, table):
self.batch.append(("drop_table_comment", (table,), {}))
def create_table(self, table):
raise NotImplementedError("Can't create table in batch mode")
def drop_table(self, table):
raise NotImplementedError("Can't drop table in batch mode")
def create_column_comment(self, column):
self.batch.append(("create_column_comment", (column,), {}))
class ApplyBatchImpl:
def __init__(
self,
impl: DefaultImpl,
table: Table,
table_args: tuple,
table_kwargs: Dict[str, Any],
reflected: bool,
partial_reordering: tuple = (),
) -> None:
self.impl = impl
self.table = table # this is a Table object
self.table_args = table_args
self.table_kwargs = table_kwargs
self.temp_table_name = self._calc_temp_name(table.name)
self.new_table: Optional[Table] = None
self.partial_reordering = partial_reordering # tuple of tuples
self.add_col_ordering: Tuple[
Tuple[str, str], ...
] = () # tuple of tuples
self.column_transfers = OrderedDict(
(c.name, {"expr": c}) for c in self.table.c
)
self.existing_ordering = list(self.column_transfers)
self.reflected = reflected
self._grab_table_elements()
@classmethod
def _calc_temp_name(cls, tablename: Union[quoted_name, str]) -> str:
return ("_alembic_tmp_%s" % tablename)[0:50]
def _grab_table_elements(self) -> None:
schema = self.table.schema
self.columns: Dict[str, Column[Any]] = OrderedDict()
for c in self.table.c:
c_copy = _copy(c, schema=schema)
c_copy.unique = c_copy.index = False
# ensure that the type object was copied,
# as we may need to modify it in-place
if isinstance(c.type, SchemaEventTarget):
assert c_copy.type is not c.type
self.columns[c.name] = c_copy
self.named_constraints: Dict[str, Constraint] = {}
self.unnamed_constraints = []
self.col_named_constraints = {}
self.indexes: Dict[str, Index] = {}
self.new_indexes: Dict[str, Index] = {}
for const in self.table.constraints:
if _is_type_bound(const):
continue
elif (
self.reflected
and isinstance(const, CheckConstraint)
and not const.name
):
# TODO: we are skipping unnamed reflected CheckConstraint
# because
# we have no way to determine _is_type_bound() for these.
pass
elif constraint_name_string(const.name):
self.named_constraints[const.name] = const
else:
self.unnamed_constraints.append(const)
if not self.reflected:
for col in self.table.c:
for const in col.constraints:
if const.name:
self.col_named_constraints[const.name] = (col, const)
for idx in self.table.indexes:
self.indexes[idx.name] = idx # type: ignore[index]
for k in self.table.kwargs:
self.table_kwargs.setdefault(k, self.table.kwargs[k])
def _adjust_self_columns_for_partial_reordering(self) -> None:
pairs = set()
col_by_idx = list(self.columns)
if self.partial_reordering:
for tuple_ in self.partial_reordering:
for index, elem in enumerate(tuple_):
if index > 0:
pairs.add((tuple_[index - 1], elem))
else:
for index, elem in enumerate(self.existing_ordering):
if index > 0:
pairs.add((col_by_idx[index - 1], elem))
pairs.update(self.add_col_ordering)
# this can happen if some columns were dropped and not removed
# from existing_ordering. this should be prevented already, but
# conservatively making sure this didn't happen
pairs_list = [p for p in pairs if p[0] != p[1]]
sorted_ = list(
topological.sort(pairs_list, col_by_idx, deterministic_order=True)
)
self.columns = OrderedDict((k, self.columns[k]) for k in sorted_)
self.column_transfers = OrderedDict(
(k, self.column_transfers[k]) for k in sorted_
)
def _transfer_elements_to_new_table(self) -> None:
assert self.new_table is None, "Can only create new table once"
m = MetaData()
schema = self.table.schema
if self.partial_reordering or self.add_col_ordering:
self._adjust_self_columns_for_partial_reordering()
self.new_table = new_table = Table(
self.temp_table_name,
m,
*(list(self.columns.values()) + list(self.table_args)),
schema=schema,
**self.table_kwargs,
)
for const in (
list(self.named_constraints.values()) + self.unnamed_constraints
):
const_columns = {c.key for c in _columns_for_constraint(const)}
if not const_columns.issubset(self.column_transfers):
continue
const_copy: Constraint
if isinstance(const, ForeignKeyConstraint):
if _fk_is_self_referential(const):
# for self-referential constraint, refer to the
# *original* table name, and not _alembic_batch_temp.
# This is consistent with how we're handling
# FK constraints from other tables; we assume SQLite
# no foreign keys just keeps the names unchanged, so
# when we rename back, they match again.
const_copy = _copy(
const, schema=schema, target_table=self.table
)
else:
# "target_table" for ForeignKeyConstraint.copy() is
# only used if the FK is detected as being
# self-referential, which we are handling above.
const_copy = _copy(const, schema=schema)
else:
const_copy = _copy(
const, schema=schema, target_table=new_table
)
if isinstance(const, ForeignKeyConstraint):
self._setup_referent(m, const)
new_table.append_constraint(const_copy)
def _gather_indexes_from_both_tables(self) -> List[Index]:
assert self.new_table is not None
idx: List[Index] = []
for idx_existing in self.indexes.values():
# this is a lift-and-move from Table.to_metadata
if idx_existing._column_flag:
continue
idx_copy = Index(
idx_existing.name,
unique=idx_existing.unique,
*[
_copy_expression(expr, self.new_table)
for expr in _idx_table_bound_expressions(idx_existing)
],
_table=self.new_table,
**idx_existing.kwargs,
)
idx.append(idx_copy)
for index in self.new_indexes.values():
idx.append(
Index(
index.name,
unique=index.unique,
*[self.new_table.c[col] for col in index.columns.keys()],
**index.kwargs,
)
)
return idx
def _setup_referent(
self, metadata: MetaData, constraint: ForeignKeyConstraint
) -> None:
spec = constraint.elements[0]._get_colspec()
parts = spec.split(".")
tname = parts[-2]
if len(parts) == 3:
referent_schema = parts[0]
else:
referent_schema = None
if tname != self.temp_table_name:
key = sql_schema._get_table_key(tname, referent_schema)
def colspec(elem: Any):
return elem._get_colspec()
if key in metadata.tables:
t = metadata.tables[key]
for elem in constraint.elements:
colname = colspec(elem).split(".")[-1]
if colname not in t.c:
t.append_column(Column(colname, sqltypes.NULLTYPE))
else:
Table(
tname,
metadata,
*[
Column(n, sqltypes.NULLTYPE)
for n in [
colspec(elem).split(".")[-1]
for elem in constraint.elements
]
],
schema=referent_schema,
)
def _create(self, op_impl: DefaultImpl) -> None:
self._transfer_elements_to_new_table()
op_impl.prep_table_for_batch(self, self.table)
assert self.new_table is not None
op_impl.create_table(self.new_table)
try:
op_impl._exec(
self.new_table.insert()
.inline()
.from_select(
list(
k
for k, transfer in self.column_transfers.items()
if "expr" in transfer
),
select(
*[
transfer["expr"]
for transfer in self.column_transfers.values()
if "expr" in transfer
]
),
)
)
op_impl.drop_table(self.table)
except:
op_impl.drop_table(self.new_table)
raise
else:
op_impl.rename_table(
self.temp_table_name, self.table.name, schema=self.table.schema
)
self.new_table.name = self.table.name
try:
for idx in self._gather_indexes_from_both_tables():
op_impl.create_index(idx)
finally:
self.new_table.name = self.temp_table_name
def alter_column(
self,
table_name: str,
column_name: str,
nullable: Optional[bool] = None,
server_default: Optional[Union[Function[Any], str, bool]] = False,
name: Optional[str] = None,
type_: Optional[TypeEngine] = None,
autoincrement: Optional[Union[bool, Literal["auto"]]] = None,
comment: Union[str, Literal[False]] = False,
**kw,
) -> None:
existing = self.columns[column_name]
existing_transfer: Dict[str, Any] = self.column_transfers[column_name]
if name is not None and name != column_name:
# note that we don't change '.key' - we keep referring
# to the renamed column by its old key in _create(). neat!
existing.name = name
existing_transfer["name"] = name
existing_type = kw.get("existing_type", None)
if existing_type:
resolved_existing_type = _resolve_for_variant(
kw["existing_type"], self.impl.dialect
)
# pop named constraints for Boolean/Enum for rename
if (
isinstance(resolved_existing_type, SchemaEventTarget)
and resolved_existing_type.name # type:ignore[attr-defined] # noqa E501
):
self.named_constraints.pop(
resolved_existing_type.name, # type:ignore[attr-defined] # noqa E501
None,
)
if type_ is not None:
type_ = sqltypes.to_instance(type_)
# old type is being discarded so turn off eventing
# rules. Alternatively we can
# erase the events set up by this type, but this is simpler.
# we also ignore the drop_constraint that will come here from
# Operations.implementation_for(alter_column)
if isinstance(existing.type, SchemaEventTarget):
existing.type._create_events = ( # type:ignore[attr-defined]
existing.type.create_constraint # type:ignore[attr-defined] # noqa
) = False
self.impl.cast_for_batch_migrate(
existing, existing_transfer, type_
)
existing.type = type_
# we *dont* however set events for the new type, because
# alter_column is invoked from
# Operations.implementation_for(alter_column) which already
# will emit an add_constraint()
if nullable is not None:
existing.nullable = nullable
if server_default is not False:
if server_default is None:
existing.server_default = None
else:
sql_schema.DefaultClause(
server_default # type: ignore[arg-type]
)._set_parent(existing)
if autoincrement is not None:
existing.autoincrement = bool(autoincrement)
if comment is not False:
existing.comment = comment
def _setup_dependencies_for_add_column(
self,
colname: str,
insert_before: Optional[str],
insert_after: Optional[str],
) -> None:
index_cols = self.existing_ordering
col_indexes = {name: i for i, name in enumerate(index_cols)}
if not self.partial_reordering:
if insert_after:
if not insert_before:
if insert_after in col_indexes:
# insert after an existing column
idx = col_indexes[insert_after] + 1
if idx < len(index_cols):
insert_before = index_cols[idx]
else:
# insert after a column that is also new
insert_before = dict(self.add_col_ordering)[
insert_after
]
if insert_before:
if not insert_after:
if insert_before in col_indexes:
# insert before an existing column
idx = col_indexes[insert_before] - 1
if idx >= 0:
insert_after = index_cols[idx]
else:
# insert before a column that is also new
insert_after = {
b: a for a, b in self.add_col_ordering
}[insert_before]
if insert_before:
self.add_col_ordering += ((colname, insert_before),)
if insert_after:
self.add_col_ordering += ((insert_after, colname),)
if (
not self.partial_reordering
and not insert_before
and not insert_after
and col_indexes
):
self.add_col_ordering += ((index_cols[-1], colname),)
def add_column(
self,
table_name: str,
column: Column[Any],
insert_before: Optional[str] = None,
insert_after: Optional[str] = None,
**kw,
) -> None:
self._setup_dependencies_for_add_column(
column.name, insert_before, insert_after
)
# we copy the column because operations.add_column()
# gives us a Column that is part of a Table already.
self.columns[column.name] = _copy(column, schema=self.table.schema)
self.column_transfers[column.name] = {}
def drop_column(
self,
table_name: str,
column: Union[ColumnClause[Any], Column[Any]],
**kw,
) -> None:
if column.name in self.table.primary_key.columns:
_remove_column_from_collection(
self.table.primary_key.columns, column
)
del self.columns[column.name]
del self.column_transfers[column.name]
self.existing_ordering.remove(column.name)
# pop named constraints for Boolean/Enum for rename
if (
"existing_type" in kw
and isinstance(kw["existing_type"], SchemaEventTarget)
and kw["existing_type"].name # type:ignore[attr-defined]
):
self.named_constraints.pop(
kw["existing_type"].name, None # type:ignore[attr-defined]
)
def create_column_comment(self, column):
"""the batch table creation function will issue create_column_comment
on the real "impl" as part of the create table process.
That is, the Column object will have the comment on it already,
so when it is received by add_column() it will be a normal part of
the CREATE TABLE and doesn't need an extra step here.
"""
def create_table_comment(self, table):
"""the batch table creation function will issue create_table_comment
on the real "impl" as part of the create table process.
"""
def drop_table_comment(self, table):
"""the batch table creation function will issue drop_table_comment
on the real "impl" as part of the create table process.
"""
def add_constraint(self, const: Constraint) -> None:
if not constraint_name_defined(const.name):
raise ValueError("Constraint must have a name")
if isinstance(const, sql_schema.PrimaryKeyConstraint):
if self.table.primary_key in self.unnamed_constraints:
self.unnamed_constraints.remove(self.table.primary_key)
if constraint_name_string(const.name):
self.named_constraints[const.name] = const
else:
self.unnamed_constraints.append(const)
def drop_constraint(self, const: Constraint) -> None:
if not const.name:
raise ValueError("Constraint must have a name")
try:
if const.name in self.col_named_constraints:
col, const = self.col_named_constraints.pop(const.name)
for col_const in list(self.columns[col.name].constraints):
if col_const.name == const.name:
self.columns[col.name].constraints.remove(col_const)
elif constraint_name_string(const.name):
const = self.named_constraints.pop(const.name)
elif const in self.unnamed_constraints:
self.unnamed_constraints.remove(const)
except KeyError:
if _is_type_bound(const):
# type-bound constraints are only included in the new
# table via their type object in any case, so ignore the
# drop_constraint() that comes here via the
# Operations.implementation_for(alter_column)
return
raise ValueError("No such constraint: '%s'" % const.name)
else:
if isinstance(const, PrimaryKeyConstraint):
for col in const.columns:
self.columns[col.name].primary_key = False
def create_index(self, idx: Index) -> None:
self.new_indexes[idx.name] = idx # type: ignore[index]
def drop_index(self, idx: Index) -> None:
try:
del self.indexes[idx.name] # type: ignore[arg-type]
except KeyError:
raise ValueError("No such index: '%s'" % idx.name)
def rename_table(self, *arg, **kw):
raise NotImplementedError("TODO")
|
from __future__ import annotations
from abc import abstractmethod
import re
from typing import Any
from typing import Callable
from typing import cast
from typing import Dict
from typing import FrozenSet
from typing import Iterator
from typing import List
from typing import MutableMapping
from typing import Optional
from typing import Sequence
from typing import Set
from typing import Tuple
from typing import Type
from typing import TYPE_CHECKING
from typing import TypeVar
from typing import Union
from sqlalchemy.types import NULLTYPE
from . import schemaobj
from .base import BatchOperations
from .base import Operations
from .. import util
from ..util import sqla_compat
if TYPE_CHECKING:
from typing import Literal
from sqlalchemy.sql import Executable
from sqlalchemy.sql.elements import ColumnElement
from sqlalchemy.sql.elements import conv
from sqlalchemy.sql.elements import quoted_name
from sqlalchemy.sql.elements import TextClause
from sqlalchemy.sql.schema import CheckConstraint
from sqlalchemy.sql.schema import Column
from sqlalchemy.sql.schema import Computed
from sqlalchemy.sql.schema import Constraint
from sqlalchemy.sql.schema import ForeignKeyConstraint
from sqlalchemy.sql.schema import Identity
from sqlalchemy.sql.schema import Index
from sqlalchemy.sql.schema import MetaData
from sqlalchemy.sql.schema import PrimaryKeyConstraint
from sqlalchemy.sql.schema import SchemaItem
from sqlalchemy.sql.schema import Table
from sqlalchemy.sql.schema import UniqueConstraint
from sqlalchemy.sql.selectable import TableClause
from sqlalchemy.sql.type_api import TypeEngine
from ..autogenerate.rewriter import Rewriter
from ..runtime.migration import MigrationContext
from ..script.revision import _RevIdType
_T = TypeVar("_T", bound=Any)
_AC = TypeVar("_AC", bound="AddConstraintOp")
class MigrateOperation:
"""base class for migration command and organization objects.
This system is part of the operation extensibility API.
.. seealso::
:ref:`operation_objects`
:ref:`operation_plugins`
:ref:`customizing_revision`
"""
@util.memoized_property
def info(self) -> Dict[Any, Any]:
"""A dictionary that may be used to store arbitrary information
along with this :class:`.MigrateOperation` object.
"""
return {}
_mutations: FrozenSet[Rewriter] = frozenset()
def reverse(self) -> MigrateOperation:
raise NotImplementedError
def to_diff_tuple(self) -> Tuple[Any, ...]:
raise NotImplementedError
class AddConstraintOp(MigrateOperation):
"""Represent an add constraint operation."""
add_constraint_ops = util.Dispatcher()
@property
def constraint_type(self) -> str:
raise NotImplementedError()
@classmethod
def register_add_constraint(
cls, type_: str
) -> Callable[[Type[_AC]], Type[_AC]]:
def go(klass: Type[_AC]) -> Type[_AC]:
cls.add_constraint_ops.dispatch_for(type_)(klass.from_constraint)
return klass
return go
@classmethod
def from_constraint(cls, constraint: Constraint) -> AddConstraintOp:
return cls.add_constraint_ops.dispatch(constraint.__visit_name__)( # type: ignore[no-any-return] # noqa: E501
constraint
)
@abstractmethod
def to_constraint(
self, migration_context: Optional[MigrationContext] = None
) -> Constraint:
pass
def reverse(self) -> DropConstraintOp:
return DropConstraintOp.from_constraint(self.to_constraint())
def to_diff_tuple(self) -> Tuple[str, Constraint]:
return ("add_constraint", self.to_constraint())
@Operations.register_operation("drop_constraint")
@BatchOperations.register_operation("drop_constraint", "batch_drop_constraint")
class DropConstraintOp(MigrateOperation):
"""Represent a drop constraint operation."""
def __init__(
self,
constraint_name: Optional[sqla_compat._ConstraintNameDefined],
table_name: str,
type_: Optional[str] = None,
*,
schema: Optional[str] = None,
_reverse: Optional[AddConstraintOp] = None,
) -> None:
self.constraint_name = constraint_name
self.table_name = table_name
self.constraint_type = type_
self.schema = schema
self._reverse = _reverse
def reverse(self) -> AddConstraintOp:
return AddConstraintOp.from_constraint(self.to_constraint())
def to_diff_tuple(
self,
) -> Tuple[str, SchemaItem]:
if self.constraint_type == "foreignkey":
return ("remove_fk", self.to_constraint())
else:
return ("remove_constraint", self.to_constraint())
@classmethod
def from_constraint(cls, constraint: Constraint) -> DropConstraintOp:
types = {
"unique_constraint": "unique",
"foreign_key_constraint": "foreignkey",
"primary_key_constraint": "primary",
"check_constraint": "check",
"column_check_constraint": "check",
"table_or_column_check_constraint": "check",
}
constraint_table = sqla_compat._table_for_constraint(constraint)
return cls(
sqla_compat.constraint_name_or_none(constraint.name),
constraint_table.name,
schema=constraint_table.schema,
type_=types.get(constraint.__visit_name__),
_reverse=AddConstraintOp.from_constraint(constraint),
)
def to_constraint(self) -> Constraint:
if self._reverse is not None:
constraint = self._reverse.to_constraint()
constraint.name = self.constraint_name
constraint_table = sqla_compat._table_for_constraint(constraint)
constraint_table.name = self.table_name
constraint_table.schema = self.schema
return constraint
else:
raise ValueError(
"constraint cannot be produced; "
"original constraint is not present"
)
@classmethod
def drop_constraint(
cls,
operations: Operations,
constraint_name: str,
table_name: str,
type_: Optional[str] = None,
*,
schema: Optional[str] = None,
) -> None:
r"""Drop a constraint of the given name, typically via DROP CONSTRAINT.
:param constraint_name: name of the constraint.
:param table_name: table name.
:param type\_: optional, required on MySQL. can be
'foreignkey', 'primary', 'unique', or 'check'.
:param schema: Optional schema name to operate within. To control
quoting of the schema outside of the default behavior, use
the SQLAlchemy construct
:class:`~sqlalchemy.sql.elements.quoted_name`.
"""
op = cls(constraint_name, table_name, type_=type_, schema=schema)
return operations.invoke(op)
@classmethod
def batch_drop_constraint(
cls,
operations: BatchOperations,
constraint_name: str,
type_: Optional[str] = None,
) -> None:
"""Issue a "drop constraint" instruction using the
current batch migration context.
The batch form of this call omits the ``table_name`` and ``schema``
arguments from the call.
.. seealso::
:meth:`.Operations.drop_constraint`
"""
op = cls(
constraint_name,
operations.impl.table_name,
type_=type_,
schema=operations.impl.schema,
)
return operations.invoke(op)
@Operations.register_operation("create_primary_key")
@BatchOperations.register_operation(
"create_primary_key", "batch_create_primary_key"
)
@AddConstraintOp.register_add_constraint("primary_key_constraint")
class CreatePrimaryKeyOp(AddConstraintOp):
"""Represent a create primary key operation."""
constraint_type = "primarykey"
def __init__(
self,
constraint_name: Optional[sqla_compat._ConstraintNameDefined],
table_name: str,
columns: Sequence[str],
*,
schema: Optional[str] = None,
**kw: Any,
) -> None:
self.constraint_name = constraint_name
self.table_name = table_name
self.columns = columns
self.schema = schema
self.kw = kw
@classmethod
def from_constraint(cls, constraint: Constraint) -> CreatePrimaryKeyOp:
constraint_table = sqla_compat._table_for_constraint(constraint)
pk_constraint = cast("PrimaryKeyConstraint", constraint)
return cls(
sqla_compat.constraint_name_or_none(pk_constraint.name),
constraint_table.name,
pk_constraint.columns.keys(),
schema=constraint_table.schema,
**pk_constraint.dialect_kwargs,
)
def to_constraint(
self, migration_context: Optional[MigrationContext] = None
) -> PrimaryKeyConstraint:
schema_obj = schemaobj.SchemaObjects(migration_context)
return schema_obj.primary_key_constraint(
self.constraint_name,
self.table_name,
self.columns,
schema=self.schema,
**self.kw,
)
@classmethod
def create_primary_key(
cls,
operations: Operations,
constraint_name: Optional[str],
table_name: str,
columns: List[str],
*,
schema: Optional[str] = None,
) -> None:
"""Issue a "create primary key" instruction using the current
migration context.
e.g.::
from alembic import op
op.create_primary_key("pk_my_table", "my_table", ["id", "version"])
This internally generates a :class:`~sqlalchemy.schema.Table` object
containing the necessary columns, then generates a new
:class:`~sqlalchemy.schema.PrimaryKeyConstraint`
object which it then associates with the
:class:`~sqlalchemy.schema.Table`.
Any event listeners associated with this action will be fired
off normally. The :class:`~sqlalchemy.schema.AddConstraint`
construct is ultimately used to generate the ALTER statement.
:param constraint_name: Name of the primary key constraint. The name
is necessary so that an ALTER statement can be emitted. For setups
that use an automated naming scheme such as that described at
:ref:`sqla:constraint_naming_conventions`
``name`` here can be ``None``, as the event listener will
apply the name to the constraint object when it is associated
with the table.
:param table_name: String name of the target table.
:param columns: a list of string column names to be applied to the
primary key constraint.
:param schema: Optional schema name to operate within. To control
quoting of the schema outside of the default behavior, use
the SQLAlchemy construct
:class:`~sqlalchemy.sql.elements.quoted_name`.
"""
op = cls(constraint_name, table_name, columns, schema=schema)
return operations.invoke(op)
@classmethod
def batch_create_primary_key(
cls,
operations: BatchOperations,
constraint_name: Optional[str],
columns: List[str],
) -> None:
"""Issue a "create primary key" instruction using the
current batch migration context.
The batch form of this call omits the ``table_name`` and ``schema``
arguments from the call.
.. seealso::
:meth:`.Operations.create_primary_key`
"""
op = cls(
constraint_name,
operations.impl.table_name,
columns,
schema=operations.impl.schema,
)
return operations.invoke(op)
@Operations.register_operation("create_unique_constraint")
@BatchOperations.register_operation(
"create_unique_constraint", "batch_create_unique_constraint"
)
@AddConstraintOp.register_add_constraint("unique_constraint")
class CreateUniqueConstraintOp(AddConstraintOp):
"""Represent a create unique constraint operation."""
constraint_type = "unique"
def __init__(
self,
constraint_name: Optional[sqla_compat._ConstraintNameDefined],
table_name: str,
columns: Sequence[str],
*,
schema: Optional[str] = None,
**kw: Any,
) -> None:
self.constraint_name = constraint_name
self.table_name = table_name
self.columns = columns
self.schema = schema
self.kw = kw
@classmethod
def from_constraint(
cls, constraint: Constraint
) -> CreateUniqueConstraintOp:
constraint_table = sqla_compat._table_for_constraint(constraint)
uq_constraint = cast("UniqueConstraint", constraint)
kw: Dict[str, Any] = {}
if uq_constraint.deferrable:
kw["deferrable"] = uq_constraint.deferrable
if uq_constraint.initially:
kw["initially"] = uq_constraint.initially
kw.update(uq_constraint.dialect_kwargs)
return cls(
sqla_compat.constraint_name_or_none(uq_constraint.name),
constraint_table.name,
[c.name for c in uq_constraint.columns],
schema=constraint_table.schema,
**kw,
)
def to_constraint(
self, migration_context: Optional[MigrationContext] = None
) -> UniqueConstraint:
schema_obj = schemaobj.SchemaObjects(migration_context)
return schema_obj.unique_constraint(
self.constraint_name,
self.table_name,
self.columns,
schema=self.schema,
**self.kw,
)
@classmethod
def create_unique_constraint(
cls,
operations: Operations,
constraint_name: Optional[str],
table_name: str,
columns: Sequence[str],
*,
schema: Optional[str] = None,
**kw: Any,
) -> Any:
"""Issue a "create unique constraint" instruction using the
current migration context.
e.g.::
from alembic import op
op.create_unique_constraint("uq_user_name", "user", ["name"])
This internally generates a :class:`~sqlalchemy.schema.Table` object
containing the necessary columns, then generates a new
:class:`~sqlalchemy.schema.UniqueConstraint`
object which it then associates with the
:class:`~sqlalchemy.schema.Table`.
Any event listeners associated with this action will be fired
off normally. The :class:`~sqlalchemy.schema.AddConstraint`
construct is ultimately used to generate the ALTER statement.
:param name: Name of the unique constraint. The name is necessary
so that an ALTER statement can be emitted. For setups that
use an automated naming scheme such as that described at
:ref:`sqla:constraint_naming_conventions`,
``name`` here can be ``None``, as the event listener will
apply the name to the constraint object when it is associated
with the table.
:param table_name: String name of the source table.
:param columns: a list of string column names in the
source table.
:param deferrable: optional bool. If set, emit DEFERRABLE or
NOT DEFERRABLE when issuing DDL for this constraint.
:param initially: optional string. If set, emit INITIALLY <value>
when issuing DDL for this constraint.
:param schema: Optional schema name to operate within. To control
quoting of the schema outside of the default behavior, use
the SQLAlchemy construct
:class:`~sqlalchemy.sql.elements.quoted_name`.
"""
op = cls(constraint_name, table_name, columns, schema=schema, **kw)
return operations.invoke(op)
@classmethod
def batch_create_unique_constraint(
cls,
operations: BatchOperations,
constraint_name: str,
columns: Sequence[str],
**kw: Any,
) -> Any:
"""Issue a "create unique constraint" instruction using the
current batch migration context.
The batch form of this call omits the ``source`` and ``schema``
arguments from the call.
.. seealso::
:meth:`.Operations.create_unique_constraint`
"""
kw["schema"] = operations.impl.schema
op = cls(constraint_name, operations.impl.table_name, columns, **kw)
return operations.invoke(op)
@Operations.register_operation("create_foreign_key")
@BatchOperations.register_operation(
"create_foreign_key", "batch_create_foreign_key"
)
@AddConstraintOp.register_add_constraint("foreign_key_constraint")
class CreateForeignKeyOp(AddConstraintOp):
"""Represent a create foreign key constraint operation."""
constraint_type = "foreignkey"
def __init__(
self,
constraint_name: Optional[sqla_compat._ConstraintNameDefined],
source_table: str,
referent_table: str,
local_cols: List[str],
remote_cols: List[str],
**kw: Any,
) -> None:
self.constraint_name = constraint_name
self.source_table = source_table
self.referent_table = referent_table
self.local_cols = local_cols
self.remote_cols = remote_cols
self.kw = kw
def to_diff_tuple(self) -> Tuple[str, ForeignKeyConstraint]:
return ("add_fk", self.to_constraint())
@classmethod
def from_constraint(cls, constraint: Constraint) -> CreateForeignKeyOp:
fk_constraint = cast("ForeignKeyConstraint", constraint)
kw: Dict[str, Any] = {}
if fk_constraint.onupdate:
kw["onupdate"] = fk_constraint.onupdate
if fk_constraint.ondelete:
kw["ondelete"] = fk_constraint.ondelete
if fk_constraint.initially:
kw["initially"] = fk_constraint.initially
if fk_constraint.deferrable:
kw["deferrable"] = fk_constraint.deferrable
if fk_constraint.use_alter:
kw["use_alter"] = fk_constraint.use_alter
if fk_constraint.match:
kw["match"] = fk_constraint.match
(
source_schema,
source_table,
source_columns,
target_schema,
target_table,
target_columns,
onupdate,
ondelete,
deferrable,
initially,
) = sqla_compat._fk_spec(fk_constraint)
kw["source_schema"] = source_schema
kw["referent_schema"] = target_schema
kw.update(fk_constraint.dialect_kwargs)
return cls(
sqla_compat.constraint_name_or_none(fk_constraint.name),
source_table,
target_table,
source_columns,
target_columns,
**kw,
)
def to_constraint(
self, migration_context: Optional[MigrationContext] = None
) -> ForeignKeyConstraint:
schema_obj = schemaobj.SchemaObjects(migration_context)
return schema_obj.foreign_key_constraint(
self.constraint_name,
self.source_table,
self.referent_table,
self.local_cols,
self.remote_cols,
**self.kw,
)
@classmethod
def create_foreign_key(
cls,
operations: Operations,
constraint_name: Optional[str],
source_table: str,
referent_table: str,
local_cols: List[str],
remote_cols: List[str],
*,
onupdate: Optional[str] = None,
ondelete: Optional[str] = None,
deferrable: Optional[bool] = None,
initially: Optional[str] = None,
match: Optional[str] = None,
source_schema: Optional[str] = None,
referent_schema: Optional[str] = None,
**dialect_kw: Any,
) -> None:
"""Issue a "create foreign key" instruction using the
current migration context.
e.g.::
from alembic import op
op.create_foreign_key(
"fk_user_address",
"address",
"user",
["user_id"],
["id"],
)
This internally generates a :class:`~sqlalchemy.schema.Table` object
containing the necessary columns, then generates a new
:class:`~sqlalchemy.schema.ForeignKeyConstraint`
object which it then associates with the
:class:`~sqlalchemy.schema.Table`.
Any event listeners associated with this action will be fired
off normally. The :class:`~sqlalchemy.schema.AddConstraint`
construct is ultimately used to generate the ALTER statement.
:param constraint_name: Name of the foreign key constraint. The name
is necessary so that an ALTER statement can be emitted. For setups
that use an automated naming scheme such as that described at
:ref:`sqla:constraint_naming_conventions`,
``name`` here can be ``None``, as the event listener will
apply the name to the constraint object when it is associated
with the table.
:param source_table: String name of the source table.
:param referent_table: String name of the destination table.
:param local_cols: a list of string column names in the
source table.
:param remote_cols: a list of string column names in the
remote table.
:param onupdate: Optional string. If set, emit ON UPDATE <value> when
issuing DDL for this constraint. Typical values include CASCADE,
DELETE and RESTRICT.
:param ondelete: Optional string. If set, emit ON DELETE <value> when
issuing DDL for this constraint. Typical values include CASCADE,
DELETE and RESTRICT.
:param deferrable: optional bool. If set, emit DEFERRABLE or NOT
DEFERRABLE when issuing DDL for this constraint.
:param source_schema: Optional schema name of the source table.
:param referent_schema: Optional schema name of the destination table.
"""
op = cls(
constraint_name,
source_table,
referent_table,
local_cols,
remote_cols,
onupdate=onupdate,
ondelete=ondelete,
deferrable=deferrable,
source_schema=source_schema,
referent_schema=referent_schema,
initially=initially,
match=match,
**dialect_kw,
)
return operations.invoke(op)
@classmethod
def batch_create_foreign_key(
cls,
operations: BatchOperations,
constraint_name: Optional[str],
referent_table: str,
local_cols: List[str],
remote_cols: List[str],
*,
referent_schema: Optional[str] = None,
onupdate: Optional[str] = None,
ondelete: Optional[str] = None,
deferrable: Optional[bool] = None,
initially: Optional[str] = None,
match: Optional[str] = None,
**dialect_kw: Any,
) -> None:
"""Issue a "create foreign key" instruction using the
current batch migration context.
The batch form of this call omits the ``source`` and ``source_schema``
arguments from the call.
e.g.::
with batch_alter_table("address") as batch_op:
batch_op.create_foreign_key(
"fk_user_address",
"user",
["user_id"],
["id"],
)
.. seealso::
:meth:`.Operations.create_foreign_key`
"""
op = cls(
constraint_name,
operations.impl.table_name,
referent_table,
local_cols,
remote_cols,
onupdate=onupdate,
ondelete=ondelete,
deferrable=deferrable,
source_schema=operations.impl.schema,
referent_schema=referent_schema,
initially=initially,
match=match,
**dialect_kw,
)
return operations.invoke(op)
@Operations.register_operation("create_check_constraint")
@BatchOperations.register_operation(
"create_check_constraint", "batch_create_check_constraint"
)
@AddConstraintOp.register_add_constraint("check_constraint")
@AddConstraintOp.register_add_constraint("table_or_column_check_constraint")
@AddConstraintOp.register_add_constraint("column_check_constraint")
class CreateCheckConstraintOp(AddConstraintOp):
"""Represent a create check constraint operation."""
constraint_type = "check"
def __init__(
self,
constraint_name: Optional[sqla_compat._ConstraintNameDefined],
table_name: str,
condition: Union[str, TextClause, ColumnElement[Any]],
*,
schema: Optional[str] = None,
**kw: Any,
) -> None:
self.constraint_name = constraint_name
self.table_name = table_name
self.condition = condition
self.schema = schema
self.kw = kw
@classmethod
def from_constraint(
cls, constraint: Constraint
) -> CreateCheckConstraintOp:
constraint_table = sqla_compat._table_for_constraint(constraint)
ck_constraint = cast("CheckConstraint", constraint)
return cls(
sqla_compat.constraint_name_or_none(ck_constraint.name),
constraint_table.name,
cast("ColumnElement[Any]", ck_constraint.sqltext),
schema=constraint_table.schema,
**ck_constraint.dialect_kwargs,
)
def to_constraint(
self, migration_context: Optional[MigrationContext] = None
) -> CheckConstraint:
schema_obj = schemaobj.SchemaObjects(migration_context)
return schema_obj.check_constraint(
self.constraint_name,
self.table_name,
self.condition,
schema=self.schema,
**self.kw,
)
@classmethod
def create_check_constraint(
cls,
operations: Operations,
constraint_name: Optional[str],
table_name: str,
condition: Union[str, ColumnElement[bool], TextClause],
*,
schema: Optional[str] = None,
**kw: Any,
) -> None:
"""Issue a "create check constraint" instruction using the
current migration context.
e.g.::
from alembic import op
from sqlalchemy.sql import column, func
op.create_check_constraint(
"ck_user_name_len",
"user",
func.len(column("name")) > 5,
)
CHECK constraints are usually against a SQL expression, so ad-hoc
table metadata is usually needed. The function will convert the given
arguments into a :class:`sqlalchemy.schema.CheckConstraint` bound
to an anonymous table in order to emit the CREATE statement.
:param name: Name of the check constraint. The name is necessary
so that an ALTER statement can be emitted. For setups that
use an automated naming scheme such as that described at
:ref:`sqla:constraint_naming_conventions`,
``name`` here can be ``None``, as the event listener will
apply the name to the constraint object when it is associated
with the table.
:param table_name: String name of the source table.
:param condition: SQL expression that's the condition of the
constraint. Can be a string or SQLAlchemy expression language
structure.
:param deferrable: optional bool. If set, emit DEFERRABLE or
NOT DEFERRABLE when issuing DDL for this constraint.
:param initially: optional string. If set, emit INITIALLY <value>
when issuing DDL for this constraint.
:param schema: Optional schema name to operate within. To control
quoting of the schema outside of the default behavior, use
the SQLAlchemy construct
:class:`~sqlalchemy.sql.elements.quoted_name`.
"""
op = cls(constraint_name, table_name, condition, schema=schema, **kw)
return operations.invoke(op)
@classmethod
def batch_create_check_constraint(
cls,
operations: BatchOperations,
constraint_name: str,
condition: Union[str, ColumnElement[bool], TextClause],
**kw: Any,
) -> None:
"""Issue a "create check constraint" instruction using the
current batch migration context.
The batch form of this call omits the ``source`` and ``schema``
arguments from the call.
.. seealso::
:meth:`.Operations.create_check_constraint`
"""
op = cls(
constraint_name,
operations.impl.table_name,
condition,
schema=operations.impl.schema,
**kw,
)
return operations.invoke(op)
@Operations.register_operation("create_index")
@BatchOperations.register_operation("create_index", "batch_create_index")
class CreateIndexOp(MigrateOperation):
"""Represent a create index operation."""
def __init__(
self,
index_name: Optional[str],
table_name: str,
columns: Sequence[Union[str, TextClause, ColumnElement[Any]]],
*,
schema: Optional[str] = None,
unique: bool = False,
if_not_exists: Optional[bool] = None,
**kw: Any,
) -> None:
self.index_name = index_name
self.table_name = table_name
self.columns = columns
self.schema = schema
self.unique = unique
self.if_not_exists = if_not_exists
self.kw = kw
def reverse(self) -> DropIndexOp:
return DropIndexOp.from_index(self.to_index())
def to_diff_tuple(self) -> Tuple[str, Index]:
return ("add_index", self.to_index())
@classmethod
def from_index(cls, index: Index) -> CreateIndexOp:
assert index.table is not None
return cls(
index.name,
index.table.name,
index.expressions,
schema=index.table.schema,
unique=index.unique,
**index.kwargs,
)
def to_index(
self, migration_context: Optional[MigrationContext] = None
) -> Index:
schema_obj = schemaobj.SchemaObjects(migration_context)
idx = schema_obj.index(
self.index_name,
self.table_name,
self.columns,
schema=self.schema,
unique=self.unique,
**self.kw,
)
return idx
@classmethod
def create_index(
cls,
operations: Operations,
index_name: Optional[str],
table_name: str,
columns: Sequence[Union[str, TextClause, ColumnElement[Any]]],
*,
schema: Optional[str] = None,
unique: bool = False,
if_not_exists: Optional[bool] = None,
**kw: Any,
) -> None:
r"""Issue a "create index" instruction using the current
migration context.
e.g.::
from alembic import op
op.create_index("ik_test", "t1", ["foo", "bar"])
Functional indexes can be produced by using the
:func:`sqlalchemy.sql.expression.text` construct::
from alembic import op
from sqlalchemy import text
op.create_index("ik_test", "t1", [text("lower(foo)")])
:param index_name: name of the index.
:param table_name: name of the owning table.
:param columns: a list consisting of string column names and/or
:func:`~sqlalchemy.sql.expression.text` constructs.
:param schema: Optional schema name to operate within. To control
quoting of the schema outside of the default behavior, use
the SQLAlchemy construct
:class:`~sqlalchemy.sql.elements.quoted_name`.
:param unique: If True, create a unique index.
:param quote: Force quoting of this column's name on or off,
corresponding to ``True`` or ``False``. When left at its default
of ``None``, the column identifier will be quoted according to
whether the name is case sensitive (identifiers with at least one
upper case character are treated as case sensitive), or if it's a
reserved word. This flag is only needed to force quoting of a
reserved word which is not known by the SQLAlchemy dialect.
:param if_not_exists: If True, adds IF NOT EXISTS operator when
creating the new index.
.. versionadded:: 1.12.0
:param \**kw: Additional keyword arguments not mentioned above are
dialect specific, and passed in the form
``<dialectname>_<argname>``.
See the documentation regarding an individual dialect at
:ref:`dialect_toplevel` for detail on documented arguments.
"""
op = cls(
index_name,
table_name,
columns,
schema=schema,
unique=unique,
if_not_exists=if_not_exists,
**kw,
)
return operations.invoke(op)
@classmethod
def batch_create_index(
cls,
operations: BatchOperations,
index_name: str,
columns: List[str],
**kw: Any,
) -> None:
"""Issue a "create index" instruction using the
current batch migration context.
.. seealso::
:meth:`.Operations.create_index`
"""
op = cls(
index_name,
operations.impl.table_name,
columns,
schema=operations.impl.schema,
**kw,
)
return operations.invoke(op)
@Operations.register_operation("drop_index")
@BatchOperations.register_operation("drop_index", "batch_drop_index")
class DropIndexOp(MigrateOperation):
"""Represent a drop index operation."""
def __init__(
self,
index_name: Union[quoted_name, str, conv],
table_name: Optional[str] = None,
*,
schema: Optional[str] = None,
if_exists: Optional[bool] = None,
_reverse: Optional[CreateIndexOp] = None,
**kw: Any,
) -> None:
self.index_name = index_name
self.table_name = table_name
self.schema = schema
self.if_exists = if_exists
self._reverse = _reverse
self.kw = kw
def to_diff_tuple(self) -> Tuple[str, Index]:
return ("remove_index", self.to_index())
def reverse(self) -> CreateIndexOp:
return CreateIndexOp.from_index(self.to_index())
@classmethod
def from_index(cls, index: Index) -> DropIndexOp:
assert index.table is not None
return cls(
index.name, # type: ignore[arg-type]
table_name=index.table.name,
schema=index.table.schema,
_reverse=CreateIndexOp.from_index(index),
unique=index.unique,
**index.kwargs,
)
def to_index(
self, migration_context: Optional[MigrationContext] = None
) -> Index:
schema_obj = schemaobj.SchemaObjects(migration_context)
# need a dummy column name here since SQLAlchemy
# 0.7.6 and further raises on Index with no columns
return schema_obj.index(
self.index_name,
self.table_name,
self._reverse.columns if self._reverse else ["x"],
schema=self.schema,
**self.kw,
)
@classmethod
def drop_index(
cls,
operations: Operations,
index_name: str,
table_name: Optional[str] = None,
*,
schema: Optional[str] = None,
if_exists: Optional[bool] = None,
**kw: Any,
) -> None:
r"""Issue a "drop index" instruction using the current
migration context.
e.g.::
drop_index("accounts")
:param index_name: name of the index.
:param table_name: name of the owning table. Some
backends such as Microsoft SQL Server require this.
:param schema: Optional schema name to operate within. To control
quoting of the schema outside of the default behavior, use
the SQLAlchemy construct
:class:`~sqlalchemy.sql.elements.quoted_name`.
:param if_exists: If True, adds IF EXISTS operator when
dropping the index.
.. versionadded:: 1.12.0
:param \**kw: Additional keyword arguments not mentioned above are
dialect specific, and passed in the form
``<dialectname>_<argname>``.
See the documentation regarding an individual dialect at
:ref:`dialect_toplevel` for detail on documented arguments.
"""
op = cls(
index_name,
table_name=table_name,
schema=schema,
if_exists=if_exists,
**kw,
)
return operations.invoke(op)
@classmethod
def batch_drop_index(
cls, operations: BatchOperations, index_name: str, **kw: Any
) -> None:
"""Issue a "drop index" instruction using the
current batch migration context.
.. seealso::
:meth:`.Operations.drop_index`
"""
op = cls(
index_name,
table_name=operations.impl.table_name,
schema=operations.impl.schema,
**kw,
)
return operations.invoke(op)
@Operations.register_operation("create_table")
class CreateTableOp(MigrateOperation):
"""Represent a create table operation."""
def __init__(
self,
table_name: str,
columns: Sequence[SchemaItem],
*,
schema: Optional[str] = None,
if_not_exists: Optional[bool] = None,
_namespace_metadata: Optional[MetaData] = None,
_constraints_included: bool = False,
**kw: Any,
) -> None:
self.table_name = table_name
self.columns = columns
self.schema = schema
self.if_not_exists = if_not_exists
self.info = kw.pop("info", {})
self.comment = kw.pop("comment", None)
self.prefixes = kw.pop("prefixes", None)
self.kw = kw
self._namespace_metadata = _namespace_metadata
self._constraints_included = _constraints_included
def reverse(self) -> DropTableOp:
return DropTableOp.from_table(
self.to_table(), _namespace_metadata=self._namespace_metadata
)
def to_diff_tuple(self) -> Tuple[str, Table]:
return ("add_table", self.to_table())
@classmethod
def from_table(
cls, table: Table, *, _namespace_metadata: Optional[MetaData] = None
) -> CreateTableOp:
if _namespace_metadata is None:
_namespace_metadata = table.metadata
return cls(
table.name,
list(table.c) + list(table.constraints),
schema=table.schema,
_namespace_metadata=_namespace_metadata,
# given a Table() object, this Table will contain full Index()
# and UniqueConstraint objects already constructed in response to
# each unique=True / index=True flag on a Column. Carry this
# state along so that when we re-convert back into a Table, we
# skip unique=True/index=True so that these constraints are
# not doubled up. see #844 #848
_constraints_included=True,
comment=table.comment,
info=dict(table.info),
prefixes=list(table._prefixes),
**table.kwargs,
)
def to_table(
self, migration_context: Optional[MigrationContext] = None
) -> Table:
schema_obj = schemaobj.SchemaObjects(migration_context)
return schema_obj.table(
self.table_name,
*self.columns,
schema=self.schema,
prefixes=list(self.prefixes) if self.prefixes else [],
comment=self.comment,
info=self.info.copy() if self.info else {},
_constraints_included=self._constraints_included,
**self.kw,
)
@classmethod
def create_table(
cls,
operations: Operations,
table_name: str,
*columns: SchemaItem,
if_not_exists: Optional[bool] = None,
**kw: Any,
) -> Table:
r"""Issue a "create table" instruction using the current migration
context.
This directive receives an argument list similar to that of the
traditional :class:`sqlalchemy.schema.Table` construct, but without the
metadata::
from sqlalchemy import INTEGER, VARCHAR, NVARCHAR, Column
from alembic import op
op.create_table(
"account",
Column("id", INTEGER, primary_key=True),
Column("name", VARCHAR(50), nullable=False),
Column("description", NVARCHAR(200)),
Column("timestamp", TIMESTAMP, server_default=func.now()),
)
Note that :meth:`.create_table` accepts
:class:`~sqlalchemy.schema.Column`
constructs directly from the SQLAlchemy library. In particular,
default values to be created on the database side are
specified using the ``server_default`` parameter, and not
``default`` which only specifies Python-side defaults::
from alembic import op
from sqlalchemy import Column, TIMESTAMP, func
# specify "DEFAULT NOW" along with the "timestamp" column
op.create_table(
"account",
Column("id", INTEGER, primary_key=True),
Column("timestamp", TIMESTAMP, server_default=func.now()),
)
The function also returns a newly created
:class:`~sqlalchemy.schema.Table` object, corresponding to the table
specification given, which is suitable for
immediate SQL operations, in particular
:meth:`.Operations.bulk_insert`::
from sqlalchemy import INTEGER, VARCHAR, NVARCHAR, Column
from alembic import op
account_table = op.create_table(
"account",
Column("id", INTEGER, primary_key=True),
Column("name", VARCHAR(50), nullable=False),
Column("description", NVARCHAR(200)),
Column("timestamp", TIMESTAMP, server_default=func.now()),
)
op.bulk_insert(
account_table,
[
{"name": "A1", "description": "account 1"},
{"name": "A2", "description": "account 2"},
],
)
:param table_name: Name of the table
:param \*columns: collection of :class:`~sqlalchemy.schema.Column`
objects within
the table, as well as optional :class:`~sqlalchemy.schema.Constraint`
objects
and :class:`~.sqlalchemy.schema.Index` objects.
:param schema: Optional schema name to operate within. To control
quoting of the schema outside of the default behavior, use
the SQLAlchemy construct
:class:`~sqlalchemy.sql.elements.quoted_name`.
:param if_not_exists: If True, adds IF NOT EXISTS operator when
creating the new table.
.. versionadded:: 1.13.3
:param \**kw: Other keyword arguments are passed to the underlying
:class:`sqlalchemy.schema.Table` object created for the command.
:return: the :class:`~sqlalchemy.schema.Table` object corresponding
to the parameters given.
"""
op = cls(table_name, columns, if_not_exists=if_not_exists, **kw)
return operations.invoke(op)
@Operations.register_operation("drop_table")
class DropTableOp(MigrateOperation):
"""Represent a drop table operation."""
def __init__(
self,
table_name: str,
*,
schema: Optional[str] = None,
if_exists: Optional[bool] = None,
table_kw: Optional[MutableMapping[Any, Any]] = None,
_reverse: Optional[CreateTableOp] = None,
) -> None:
self.table_name = table_name
self.schema = schema
self.if_exists = if_exists
self.table_kw = table_kw or {}
self.comment = self.table_kw.pop("comment", None)
self.info = self.table_kw.pop("info", None)
self.prefixes = self.table_kw.pop("prefixes", None)
self._reverse = _reverse
def to_diff_tuple(self) -> Tuple[str, Table]:
return ("remove_table", self.to_table())
def reverse(self) -> CreateTableOp:
return CreateTableOp.from_table(self.to_table())
@classmethod
def from_table(
cls, table: Table, *, _namespace_metadata: Optional[MetaData] = None
) -> DropTableOp:
return cls(
table.name,
schema=table.schema,
table_kw={
"comment": table.comment,
"info": dict(table.info),
"prefixes": list(table._prefixes),
**table.kwargs,
},
_reverse=CreateTableOp.from_table(
table, _namespace_metadata=_namespace_metadata
),
)
def to_table(
self, migration_context: Optional[MigrationContext] = None
) -> Table:
if self._reverse:
cols_and_constraints = self._reverse.columns
else:
cols_and_constraints = []
schema_obj = schemaobj.SchemaObjects(migration_context)
t = schema_obj.table(
self.table_name,
*cols_and_constraints,
comment=self.comment,
info=self.info.copy() if self.info else {},
prefixes=list(self.prefixes) if self.prefixes else [],
schema=self.schema,
_constraints_included=(
self._reverse._constraints_included if self._reverse else False
),
**self.table_kw,
)
return t
@classmethod
def drop_table(
cls,
operations: Operations,
table_name: str,
*,
schema: Optional[str] = None,
if_exists: Optional[bool] = None,
**kw: Any,
) -> None:
r"""Issue a "drop table" instruction using the current
migration context.
e.g.::
drop_table("accounts")
:param table_name: Name of the table
:param schema: Optional schema name to operate within. To control
quoting of the schema outside of the default behavior, use
the SQLAlchemy construct
:class:`~sqlalchemy.sql.elements.quoted_name`.
:param if_exists: If True, adds IF EXISTS operator when
dropping the table.
.. versionadded:: 1.13.3
:param \**kw: Other keyword arguments are passed to the underlying
:class:`sqlalchemy.schema.Table` object created for the command.
"""
op = cls(table_name, schema=schema, if_exists=if_exists, table_kw=kw)
operations.invoke(op)
class AlterTableOp(MigrateOperation):
"""Represent an alter table operation."""
def __init__(
self,
table_name: str,
*,
schema: Optional[str] = None,
) -> None:
self.table_name = table_name
self.schema = schema
@Operations.register_operation("rename_table")
class RenameTableOp(AlterTableOp):
"""Represent a rename table operation."""
def __init__(
self,
old_table_name: str,
new_table_name: str,
*,
schema: Optional[str] = None,
) -> None:
super().__init__(old_table_name, schema=schema)
self.new_table_name = new_table_name
@classmethod
def rename_table(
cls,
operations: Operations,
old_table_name: str,
new_table_name: str,
*,
schema: Optional[str] = None,
) -> None:
"""Emit an ALTER TABLE to rename a table.
:param old_table_name: old name.
:param new_table_name: new name.
:param schema: Optional schema name to operate within. To control
quoting of the schema outside of the default behavior, use
the SQLAlchemy construct
:class:`~sqlalchemy.sql.elements.quoted_name`.
"""
op = cls(old_table_name, new_table_name, schema=schema)
return operations.invoke(op)
@Operations.register_operation("create_table_comment")
@BatchOperations.register_operation(
"create_table_comment", "batch_create_table_comment"
)
class CreateTableCommentOp(AlterTableOp):
"""Represent a COMMENT ON `table` operation."""
def __init__(
self,
table_name: str,
comment: Optional[str],
*,
schema: Optional[str] = None,
existing_comment: Optional[str] = None,
) -> None:
self.table_name = table_name
self.comment = comment
self.existing_comment = existing_comment
self.schema = schema
@classmethod
def create_table_comment(
cls,
operations: Operations,
table_name: str,
comment: Optional[str],
*,
existing_comment: Optional[str] = None,
schema: Optional[str] = None,
) -> None:
"""Emit a COMMENT ON operation to set the comment for a table.
:param table_name: string name of the target table.
:param comment: string value of the comment being registered against
the specified table.
:param existing_comment: String value of a comment
already registered on the specified table, used within autogenerate
so that the operation is reversible, but not required for direct
use.
.. seealso::
:meth:`.Operations.drop_table_comment`
:paramref:`.Operations.alter_column.comment`
"""
op = cls(
table_name,
comment,
existing_comment=existing_comment,
schema=schema,
)
return operations.invoke(op)
@classmethod
def batch_create_table_comment(
cls,
operations: BatchOperations,
comment: Optional[str],
*,
existing_comment: Optional[str] = None,
) -> None:
"""Emit a COMMENT ON operation to set the comment for a table
using the current batch migration context.
:param comment: string value of the comment being registered against
the specified table.
:param existing_comment: String value of a comment
already registered on the specified table, used within autogenerate
so that the operation is reversible, but not required for direct
use.
"""
op = cls(
operations.impl.table_name,
comment,
existing_comment=existing_comment,
schema=operations.impl.schema,
)
return operations.invoke(op)
def reverse(self) -> Union[CreateTableCommentOp, DropTableCommentOp]:
"""Reverses the COMMENT ON operation against a table."""
if self.existing_comment is None:
return DropTableCommentOp(
self.table_name,
existing_comment=self.comment,
schema=self.schema,
)
else:
return CreateTableCommentOp(
self.table_name,
self.existing_comment,
existing_comment=self.comment,
schema=self.schema,
)
def to_table(
self, migration_context: Optional[MigrationContext] = None
) -> Table:
schema_obj = schemaobj.SchemaObjects(migration_context)
return schema_obj.table(
self.table_name, schema=self.schema, comment=self.comment
)
def to_diff_tuple(self) -> Tuple[Any, ...]:
return ("add_table_comment", self.to_table(), self.existing_comment)
@Operations.register_operation("drop_table_comment")
@BatchOperations.register_operation(
"drop_table_comment", "batch_drop_table_comment"
)
class DropTableCommentOp(AlterTableOp):
"""Represent an operation to remove the comment from a table."""
def __init__(
self,
table_name: str,
*,
schema: Optional[str] = None,
existing_comment: Optional[str] = None,
) -> None:
self.table_name = table_name
self.existing_comment = existing_comment
self.schema = schema
@classmethod
def drop_table_comment(
cls,
operations: Operations,
table_name: str,
*,
existing_comment: Optional[str] = None,
schema: Optional[str] = None,
) -> None:
"""Issue a "drop table comment" operation to
remove an existing comment set on a table.
:param table_name: string name of the target table.
:param existing_comment: An optional string value of a comment already
registered on the specified table.
.. seealso::
:meth:`.Operations.create_table_comment`
:paramref:`.Operations.alter_column.comment`
"""
op = cls(table_name, existing_comment=existing_comment, schema=schema)
return operations.invoke(op)
@classmethod
def batch_drop_table_comment(
cls,
operations: BatchOperations,
*,
existing_comment: Optional[str] = None,
) -> None:
"""Issue a "drop table comment" operation to
remove an existing comment set on a table using the current
batch operations context.
:param existing_comment: An optional string value of a comment already
registered on the specified table.
"""
op = cls(
operations.impl.table_name,
existing_comment=existing_comment,
schema=operations.impl.schema,
)
return operations.invoke(op)
def reverse(self) -> CreateTableCommentOp:
"""Reverses the COMMENT ON operation against a table."""
return CreateTableCommentOp(
self.table_name, self.existing_comment, schema=self.schema
)
def to_table(
self, migration_context: Optional[MigrationContext] = None
) -> Table:
schema_obj = schemaobj.SchemaObjects(migration_context)
return schema_obj.table(self.table_name, schema=self.schema)
def to_diff_tuple(self) -> Tuple[Any, ...]:
return ("remove_table_comment", self.to_table())
@Operations.register_operation("alter_column")
@BatchOperations.register_operation("alter_column", "batch_alter_column")
class AlterColumnOp(AlterTableOp):
"""Represent an alter column operation."""
def __init__(
self,
table_name: str,
column_name: str,
*,
schema: Optional[str] = None,
existing_type: Optional[Any] = None,
existing_server_default: Any = False,
existing_nullable: Optional[bool] = None,
existing_comment: Optional[str] = None,
modify_nullable: Optional[bool] = None,
modify_comment: Optional[Union[str, Literal[False]]] = False,
modify_server_default: Any = False,
modify_name: Optional[str] = None,
modify_type: Optional[Any] = None,
**kw: Any,
) -> None:
super().__init__(table_name, schema=schema)
self.column_name = column_name
self.existing_type = existing_type
self.existing_server_default = existing_server_default
self.existing_nullable = existing_nullable
self.existing_comment = existing_comment
self.modify_nullable = modify_nullable
self.modify_comment = modify_comment
self.modify_server_default = modify_server_default
self.modify_name = modify_name
self.modify_type = modify_type
self.kw = kw
def to_diff_tuple(self) -> Any:
col_diff = []
schema, tname, cname = self.schema, self.table_name, self.column_name
if self.modify_type is not None:
col_diff.append(
(
"modify_type",
schema,
tname,
cname,
{
"existing_nullable": self.existing_nullable,
"existing_server_default": (
self.existing_server_default
),
"existing_comment": self.existing_comment,
},
self.existing_type,
self.modify_type,
)
)
if self.modify_nullable is not None:
col_diff.append(
(
"modify_nullable",
schema,
tname,
cname,
{
"existing_type": self.existing_type,
"existing_server_default": (
self.existing_server_default
),
"existing_comment": self.existing_comment,
},
self.existing_nullable,
self.modify_nullable,
)
)
if self.modify_server_default is not False:
col_diff.append(
(
"modify_default",
schema,
tname,
cname,
{
"existing_nullable": self.existing_nullable,
"existing_type": self.existing_type,
"existing_comment": self.existing_comment,
},
self.existing_server_default,
self.modify_server_default,
)
)
if self.modify_comment is not False:
col_diff.append(
(
"modify_comment",
schema,
tname,
cname,
{
"existing_nullable": self.existing_nullable,
"existing_type": self.existing_type,
"existing_server_default": (
self.existing_server_default
),
},
self.existing_comment,
self.modify_comment,
)
)
return col_diff
def has_changes(self) -> bool:
hc1 = (
self.modify_nullable is not None
or self.modify_server_default is not False
or self.modify_type is not None
or self.modify_comment is not False
)
if hc1:
return True
for kw in self.kw:
if kw.startswith("modify_"):
return True
else:
return False
def reverse(self) -> AlterColumnOp:
kw = self.kw.copy()
kw["existing_type"] = self.existing_type
kw["existing_nullable"] = self.existing_nullable
kw["existing_server_default"] = self.existing_server_default
kw["existing_comment"] = self.existing_comment
if self.modify_type is not None:
kw["modify_type"] = self.modify_type
if self.modify_nullable is not None:
kw["modify_nullable"] = self.modify_nullable
if self.modify_server_default is not False:
kw["modify_server_default"] = self.modify_server_default
if self.modify_comment is not False:
kw["modify_comment"] = self.modify_comment
# TODO: make this a little simpler
all_keys = {
m.group(1)
for m in [re.match(r"^(?:existing_|modify_)(.+)$", k) for k in kw]
if m
}
for k in all_keys:
if "modify_%s" % k in kw:
swap = kw["existing_%s" % k]
kw["existing_%s" % k] = kw["modify_%s" % k]
kw["modify_%s" % k] = swap
return self.__class__(
self.table_name, self.column_name, schema=self.schema, **kw
)
@classmethod
def alter_column(
cls,
operations: Operations,
table_name: str,
column_name: str,
*,
nullable: Optional[bool] = None,
comment: Optional[Union[str, Literal[False]]] = False,
server_default: Union[
str, bool, Identity, Computed, TextClause
] = False,
new_column_name: Optional[str] = None,
type_: Optional[Union[TypeEngine[Any], Type[TypeEngine[Any]]]] = None,
existing_type: Optional[
Union[TypeEngine[Any], Type[TypeEngine[Any]]]
] = None,
existing_server_default: Union[
str, bool, Identity, Computed, TextClause, None
] = False,
existing_nullable: Optional[bool] = None,
existing_comment: Optional[str] = None,
schema: Optional[str] = None,
**kw: Any,
) -> None:
r"""Issue an "alter column" instruction using the
current migration context.
Generally, only that aspect of the column which
is being changed, i.e. name, type, nullability,
default, needs to be specified. Multiple changes
can also be specified at once and the backend should
"do the right thing", emitting each change either
separately or together as the backend allows.
MySQL has special requirements here, since MySQL
cannot ALTER a column without a full specification.
When producing MySQL-compatible migration files,
it is recommended that the ``existing_type``,
``existing_server_default``, and ``existing_nullable``
parameters be present, if not being altered.
Type changes which are against the SQLAlchemy
"schema" types :class:`~sqlalchemy.types.Boolean`
and :class:`~sqlalchemy.types.Enum` may also
add or drop constraints which accompany those
types on backends that don't support them natively.
The ``existing_type`` argument is
used in this case to identify and remove a previous
constraint that was bound to the type object.
:param table_name: string name of the target table.
:param column_name: string name of the target column,
as it exists before the operation begins.
:param nullable: Optional; specify ``True`` or ``False``
to alter the column's nullability.
:param server_default: Optional; specify a string
SQL expression, :func:`~sqlalchemy.sql.expression.text`,
or :class:`~sqlalchemy.schema.DefaultClause` to indicate
an alteration to the column's default value.
Set to ``None`` to have the default removed.
:param comment: optional string text of a new comment to add to the
column.
:param new_column_name: Optional; specify a string name here to
indicate the new name within a column rename operation.
:param type\_: Optional; a :class:`~sqlalchemy.types.TypeEngine`
type object to specify a change to the column's type.
For SQLAlchemy types that also indicate a constraint (i.e.
:class:`~sqlalchemy.types.Boolean`, :class:`~sqlalchemy.types.Enum`),
the constraint is also generated.
:param autoincrement: set the ``AUTO_INCREMENT`` flag of the column;
currently understood by the MySQL dialect.
:param existing_type: Optional; a
:class:`~sqlalchemy.types.TypeEngine`
type object to specify the previous type. This
is required for all MySQL column alter operations that
don't otherwise specify a new type, as well as for
when nullability is being changed on a SQL Server
column. It is also used if the type is a so-called
SQLAlchemy "schema" type which may define a constraint (i.e.
:class:`~sqlalchemy.types.Boolean`,
:class:`~sqlalchemy.types.Enum`),
so that the constraint can be dropped.
:param existing_server_default: Optional; The existing
default value of the column. Required on MySQL if
an existing default is not being changed; else MySQL
removes the default.
:param existing_nullable: Optional; the existing nullability
of the column. Required on MySQL if the existing nullability
is not being changed; else MySQL sets this to NULL.
:param existing_autoincrement: Optional; the existing autoincrement
of the column. Used for MySQL's system of altering a column
that specifies ``AUTO_INCREMENT``.
:param existing_comment: string text of the existing comment on the
column to be maintained. Required on MySQL if the existing comment
on the column is not being changed.
:param schema: Optional schema name to operate within. To control
quoting of the schema outside of the default behavior, use
the SQLAlchemy construct
:class:`~sqlalchemy.sql.elements.quoted_name`.
:param postgresql_using: String argument which will indicate a
SQL expression to render within the Postgresql-specific USING clause
within ALTER COLUMN. This string is taken directly as raw SQL which
must explicitly include any necessary quoting or escaping of tokens
within the expression.
"""
alt = cls(
table_name,
column_name,
schema=schema,
existing_type=existing_type,
existing_server_default=existing_server_default,
existing_nullable=existing_nullable,
existing_comment=existing_comment,
modify_name=new_column_name,
modify_type=type_,
modify_server_default=server_default,
modify_nullable=nullable,
modify_comment=comment,
**kw,
)
return operations.invoke(alt)
@classmethod
def batch_alter_column(
cls,
operations: BatchOperations,
column_name: str,
*,
nullable: Optional[bool] = None,
comment: Optional[Union[str, Literal[False]]] = False,
server_default: Any = False,
new_column_name: Optional[str] = None,
type_: Optional[Union[TypeEngine[Any], Type[TypeEngine[Any]]]] = None,
existing_type: Optional[
Union[TypeEngine[Any], Type[TypeEngine[Any]]]
] = None,
existing_server_default: Optional[
Union[str, bool, Identity, Computed]
] = False,
existing_nullable: Optional[bool] = None,
existing_comment: Optional[str] = None,
insert_before: Optional[str] = None,
insert_after: Optional[str] = None,
**kw: Any,
) -> None:
"""Issue an "alter column" instruction using the current
batch migration context.
Parameters are the same as that of :meth:`.Operations.alter_column`,
as well as the following option(s):
:param insert_before: String name of an existing column which this
column should be placed before, when creating the new table.
:param insert_after: String name of an existing column which this
column should be placed after, when creating the new table. If
both :paramref:`.BatchOperations.alter_column.insert_before`
and :paramref:`.BatchOperations.alter_column.insert_after` are
omitted, the column is inserted after the last existing column
in the table.
.. seealso::
:meth:`.Operations.alter_column`
"""
alt = cls(
operations.impl.table_name,
column_name,
schema=operations.impl.schema,
existing_type=existing_type,
existing_server_default=existing_server_default,
existing_nullable=existing_nullable,
existing_comment=existing_comment,
modify_name=new_column_name,
modify_type=type_,
modify_server_default=server_default,
modify_nullable=nullable,
modify_comment=comment,
insert_before=insert_before,
insert_after=insert_after,
**kw,
)
return operations.invoke(alt)
@Operations.register_operation("add_column")
@BatchOperations.register_operation("add_column", "batch_add_column")
class AddColumnOp(AlterTableOp):
"""Represent an add column operation."""
def __init__(
self,
table_name: str,
column: Column[Any],
*,
schema: Optional[str] = None,
**kw: Any,
) -> None:
super().__init__(table_name, schema=schema)
self.column = column
self.kw = kw
def reverse(self) -> DropColumnOp:
return DropColumnOp.from_column_and_tablename(
self.schema, self.table_name, self.column
)
def to_diff_tuple(
self,
) -> Tuple[str, Optional[str], str, Column[Any]]:
return ("add_column", self.schema, self.table_name, self.column)
def to_column(self) -> Column[Any]:
return self.column
@classmethod
def from_column(cls, col: Column[Any]) -> AddColumnOp:
return cls(col.table.name, col, schema=col.table.schema)
@classmethod
def from_column_and_tablename(
cls,
schema: Optional[str],
tname: str,
col: Column[Any],
) -> AddColumnOp:
return cls(tname, col, schema=schema)
@classmethod
def add_column(
cls,
operations: Operations,
table_name: str,
column: Column[Any],
*,
schema: Optional[str] = None,
) -> None:
"""Issue an "add column" instruction using the current
migration context.
e.g.::
from alembic import op
from sqlalchemy import Column, String
op.add_column("organization", Column("name", String()))
The :meth:`.Operations.add_column` method typically corresponds
to the SQL command "ALTER TABLE... ADD COLUMN". Within the scope
of this command, the column's name, datatype, nullability,
and optional server-generated defaults may be indicated.
.. note::
With the exception of NOT NULL constraints or single-column FOREIGN
KEY constraints, other kinds of constraints such as PRIMARY KEY,
UNIQUE or CHECK constraints **cannot** be generated using this
method; for these constraints, refer to operations such as
:meth:`.Operations.create_primary_key` and
:meth:`.Operations.create_check_constraint`. In particular, the
following :class:`~sqlalchemy.schema.Column` parameters are
**ignored**:
* :paramref:`~sqlalchemy.schema.Column.primary_key` - SQL databases
typically do not support an ALTER operation that can add
individual columns one at a time to an existing primary key
constraint, therefore it's less ambiguous to use the
:meth:`.Operations.create_primary_key` method, which assumes no
existing primary key constraint is present.
* :paramref:`~sqlalchemy.schema.Column.unique` - use the
:meth:`.Operations.create_unique_constraint` method
* :paramref:`~sqlalchemy.schema.Column.index` - use the
:meth:`.Operations.create_index` method
The provided :class:`~sqlalchemy.schema.Column` object may include a
:class:`~sqlalchemy.schema.ForeignKey` constraint directive,
referencing a remote table name. For this specific type of constraint,
Alembic will automatically emit a second ALTER statement in order to
add the single-column FOREIGN KEY constraint separately::
from alembic import op
from sqlalchemy import Column, INTEGER, ForeignKey
op.add_column(
"organization",
Column("account_id", INTEGER, ForeignKey("accounts.id")),
)
The column argument passed to :meth:`.Operations.add_column` is a
:class:`~sqlalchemy.schema.Column` construct, used in the same way it's
used in SQLAlchemy. In particular, values or functions to be indicated
as producing the column's default value on the database side are
specified using the ``server_default`` parameter, and not ``default``
which only specifies Python-side defaults::
from alembic import op
from sqlalchemy import Column, TIMESTAMP, func
# specify "DEFAULT NOW" along with the column add
op.add_column(
"account",
Column("timestamp", TIMESTAMP, server_default=func.now()),
)
:param table_name: String name of the parent table.
:param column: a :class:`sqlalchemy.schema.Column` object
representing the new column.
:param schema: Optional schema name to operate within. To control
quoting of the schema outside of the default behavior, use
the SQLAlchemy construct
:class:`~sqlalchemy.sql.elements.quoted_name`.
"""
op = cls(table_name, column, schema=schema)
return operations.invoke(op)
@classmethod
def batch_add_column(
cls,
operations: BatchOperations,
column: Column[Any],
*,
insert_before: Optional[str] = None,
insert_after: Optional[str] = None,
) -> None:
"""Issue an "add column" instruction using the current
batch migration context.
.. seealso::
:meth:`.Operations.add_column`
"""
kw = {}
if insert_before:
kw["insert_before"] = insert_before
if insert_after:
kw["insert_after"] = insert_after
op = cls(
operations.impl.table_name,
column,
schema=operations.impl.schema,
**kw,
)
return operations.invoke(op)
@Operations.register_operation("drop_column")
@BatchOperations.register_operation("drop_column", "batch_drop_column")
class DropColumnOp(AlterTableOp):
"""Represent a drop column operation."""
def __init__(
self,
table_name: str,
column_name: str,
*,
schema: Optional[str] = None,
_reverse: Optional[AddColumnOp] = None,
**kw: Any,
) -> None:
super().__init__(table_name, schema=schema)
self.column_name = column_name
self.kw = kw
self._reverse = _reverse
def to_diff_tuple(
self,
) -> Tuple[str, Optional[str], str, Column[Any]]:
return (
"remove_column",
self.schema,
self.table_name,
self.to_column(),
)
def reverse(self) -> AddColumnOp:
if self._reverse is None:
raise ValueError(
"operation is not reversible; "
"original column is not present"
)
return AddColumnOp.from_column_and_tablename(
self.schema, self.table_name, self._reverse.column
)
@classmethod
def from_column_and_tablename(
cls,
schema: Optional[str],
tname: str,
col: Column[Any],
) -> DropColumnOp:
return cls(
tname,
col.name,
schema=schema,
_reverse=AddColumnOp.from_column_and_tablename(schema, tname, col),
)
def to_column(
self, migration_context: Optional[MigrationContext] = None
) -> Column[Any]:
if self._reverse is not None:
return self._reverse.column
schema_obj = schemaobj.SchemaObjects(migration_context)
return schema_obj.column(self.column_name, NULLTYPE)
@classmethod
def drop_column(
cls,
operations: Operations,
table_name: str,
column_name: str,
*,
schema: Optional[str] = None,
**kw: Any,
) -> None:
"""Issue a "drop column" instruction using the current
migration context.
e.g.::
drop_column("organization", "account_id")
:param table_name: name of table
:param column_name: name of column
:param schema: Optional schema name to operate within. To control
quoting of the schema outside of the default behavior, use
the SQLAlchemy construct
:class:`~sqlalchemy.sql.elements.quoted_name`.
:param mssql_drop_check: Optional boolean. When ``True``, on
Microsoft SQL Server only, first
drop the CHECK constraint on the column using a
SQL-script-compatible
block that selects into a @variable from sys.check_constraints,
then exec's a separate DROP CONSTRAINT for that constraint.
:param mssql_drop_default: Optional boolean. When ``True``, on
Microsoft SQL Server only, first
drop the DEFAULT constraint on the column using a
SQL-script-compatible
block that selects into a @variable from sys.default_constraints,
then exec's a separate DROP CONSTRAINT for that default.
:param mssql_drop_foreign_key: Optional boolean. When ``True``, on
Microsoft SQL Server only, first
drop a single FOREIGN KEY constraint on the column using a
SQL-script-compatible
block that selects into a @variable from
sys.foreign_keys/sys.foreign_key_columns,
then exec's a separate DROP CONSTRAINT for that default. Only
works if the column has exactly one FK constraint which refers to
it, at the moment.
"""
op = cls(table_name, column_name, schema=schema, **kw)
return operations.invoke(op)
@classmethod
def batch_drop_column(
cls, operations: BatchOperations, column_name: str, **kw: Any
) -> None:
"""Issue a "drop column" instruction using the current
batch migration context.
.. seealso::
:meth:`.Operations.drop_column`
"""
op = cls(
operations.impl.table_name,
column_name,
schema=operations.impl.schema,
**kw,
)
return operations.invoke(op)
@Operations.register_operation("bulk_insert")
class BulkInsertOp(MigrateOperation):
"""Represent a bulk insert operation."""
def __init__(
self,
table: Union[Table, TableClause],
rows: List[Dict[str, Any]],
*,
multiinsert: bool = True,
) -> None:
self.table = table
self.rows = rows
self.multiinsert = multiinsert
@classmethod
def bulk_insert(
cls,
operations: Operations,
table: Union[Table, TableClause],
rows: List[Dict[str, Any]],
*,
multiinsert: bool = True,
) -> None:
"""Issue a "bulk insert" operation using the current
migration context.
This provides a means of representing an INSERT of multiple rows
which works equally well in the context of executing on a live
connection as well as that of generating a SQL script. In the
case of a SQL script, the values are rendered inline into the
statement.
e.g.::
from alembic import op
from datetime import date
from sqlalchemy.sql import table, column
from sqlalchemy import String, Integer, Date
# Create an ad-hoc table to use for the insert statement.
accounts_table = table(
"account",
column("id", Integer),
column("name", String),
column("create_date", Date),
)
op.bulk_insert(
accounts_table,
[
{
"id": 1,
"name": "John Smith",
"create_date": date(2010, 10, 5),
},
{
"id": 2,
"name": "Ed Williams",
"create_date": date(2007, 5, 27),
},
{
"id": 3,
"name": "Wendy Jones",
"create_date": date(2008, 8, 15),
},
],
)
When using --sql mode, some datatypes may not render inline
automatically, such as dates and other special types. When this
issue is present, :meth:`.Operations.inline_literal` may be used::
op.bulk_insert(
accounts_table,
[
{
"id": 1,
"name": "John Smith",
"create_date": op.inline_literal("2010-10-05"),
},
{
"id": 2,
"name": "Ed Williams",
"create_date": op.inline_literal("2007-05-27"),
},
{
"id": 3,
"name": "Wendy Jones",
"create_date": op.inline_literal("2008-08-15"),
},
],
multiinsert=False,
)
When using :meth:`.Operations.inline_literal` in conjunction with
:meth:`.Operations.bulk_insert`, in order for the statement to work
in "online" (e.g. non --sql) mode, the
:paramref:`~.Operations.bulk_insert.multiinsert`
flag should be set to ``False``, which will have the effect of
individual INSERT statements being emitted to the database, each
with a distinct VALUES clause, so that the "inline" values can
still be rendered, rather than attempting to pass the values
as bound parameters.
:param table: a table object which represents the target of the INSERT.
:param rows: a list of dictionaries indicating rows.
:param multiinsert: when at its default of True and --sql mode is not
enabled, the INSERT statement will be executed using
"executemany()" style, where all elements in the list of
dictionaries are passed as bound parameters in a single
list. Setting this to False results in individual INSERT
statements being emitted per parameter set, and is needed
in those cases where non-literal values are present in the
parameter sets.
"""
op = cls(table, rows, multiinsert=multiinsert)
operations.invoke(op)
@Operations.register_operation("execute")
@BatchOperations.register_operation("execute", "batch_execute")
class ExecuteSQLOp(MigrateOperation):
"""Represent an execute SQL operation."""
def __init__(
self,
sqltext: Union[Executable, str],
*,
execution_options: Optional[dict[str, Any]] = None,
) -> None:
self.sqltext = sqltext
self.execution_options = execution_options
@classmethod
def execute(
cls,
operations: Operations,
sqltext: Union[Executable, str],
*,
execution_options: Optional[dict[str, Any]] = None,
) -> None:
r"""Execute the given SQL using the current migration context.
The given SQL can be a plain string, e.g.::
op.execute("INSERT INTO table (foo) VALUES ('some value')")
Or it can be any kind of Core SQL Expression construct, such as
below where we use an update construct::
from sqlalchemy.sql import table, column
from sqlalchemy import String
from alembic import op
account = table("account", column("name", String))
op.execute(
account.update()
.where(account.c.name == op.inline_literal("account 1"))
.values({"name": op.inline_literal("account 2")})
)
Above, we made use of the SQLAlchemy
:func:`sqlalchemy.sql.expression.table` and
:func:`sqlalchemy.sql.expression.column` constructs to make a brief,
ad-hoc table construct just for our UPDATE statement. A full
:class:`~sqlalchemy.schema.Table` construct of course works perfectly
fine as well, though note it's a recommended practice to at least
ensure the definition of a table is self-contained within the migration
script, rather than imported from a module that may break compatibility
with older migrations.
In a SQL script context, the statement is emitted directly to the
output stream. There is *no* return result, however, as this
function is oriented towards generating a change script
that can run in "offline" mode. Additionally, parameterized
statements are discouraged here, as they *will not work* in offline
mode. Above, we use :meth:`.inline_literal` where parameters are
to be used.
For full interaction with a connected database where parameters can
also be used normally, use the "bind" available from the context::
from alembic import op
connection = op.get_bind()
connection.execute(
account.update()
.where(account.c.name == "account 1")
.values({"name": "account 2"})
)
Additionally, when passing the statement as a plain string, it is first
coerced into a :func:`sqlalchemy.sql.expression.text` construct
before being passed along. In the less likely case that the
literal SQL string contains a colon, it must be escaped with a
backslash, as::
op.execute(r"INSERT INTO table (foo) VALUES ('\:colon_value')")
:param sqltext: Any legal SQLAlchemy expression, including:
* a string
* a :func:`sqlalchemy.sql.expression.text` construct.
* a :func:`sqlalchemy.sql.expression.insert` construct.
* a :func:`sqlalchemy.sql.expression.update` construct.
* a :func:`sqlalchemy.sql.expression.delete` construct.
* Any "executable" described in SQLAlchemy Core documentation,
noting that no result set is returned.
.. note:: when passing a plain string, the statement is coerced into
a :func:`sqlalchemy.sql.expression.text` construct. This construct
considers symbols with colons, e.g. ``:foo`` to be bound parameters.
To avoid this, ensure that colon symbols are escaped, e.g.
``\:foo``.
:param execution_options: Optional dictionary of
execution options, will be passed to
:meth:`sqlalchemy.engine.Connection.execution_options`.
"""
op = cls(sqltext, execution_options=execution_options)
return operations.invoke(op)
@classmethod
def batch_execute(
cls,
operations: Operations,
sqltext: Union[Executable, str],
*,
execution_options: Optional[dict[str, Any]] = None,
) -> None:
"""Execute the given SQL using the current migration context.
.. seealso::
:meth:`.Operations.execute`
"""
return cls.execute(
operations, sqltext, execution_options=execution_options
)
def to_diff_tuple(self) -> Tuple[str, Union[Executable, str]]:
return ("execute", self.sqltext)
class OpContainer(MigrateOperation):
"""Represent a sequence of operations operation."""
def __init__(self, ops: Sequence[MigrateOperation] = ()) -> None:
self.ops = list(ops)
def is_empty(self) -> bool:
return not self.ops
def as_diffs(self) -> Any:
return list(OpContainer._ops_as_diffs(self))
@classmethod
def _ops_as_diffs(
cls, migrations: OpContainer
) -> Iterator[Tuple[Any, ...]]:
for op in migrations.ops:
if hasattr(op, "ops"):
yield from cls._ops_as_diffs(cast("OpContainer", op))
else:
yield op.to_diff_tuple()
class ModifyTableOps(OpContainer):
"""Contains a sequence of operations that all apply to a single Table."""
def __init__(
self,
table_name: str,
ops: Sequence[MigrateOperation],
*,
schema: Optional[str] = None,
) -> None:
super().__init__(ops)
self.table_name = table_name
self.schema = schema
def reverse(self) -> ModifyTableOps:
return ModifyTableOps(
self.table_name,
ops=list(reversed([op.reverse() for op in self.ops])),
schema=self.schema,
)
class UpgradeOps(OpContainer):
"""contains a sequence of operations that would apply to the
'upgrade' stream of a script.
.. seealso::
:ref:`customizing_revision`
"""
def __init__(
self,
ops: Sequence[MigrateOperation] = (),
upgrade_token: str = "upgrades",
) -> None:
super().__init__(ops=ops)
self.upgrade_token = upgrade_token
def reverse_into(self, downgrade_ops: DowngradeOps) -> DowngradeOps:
downgrade_ops.ops[:] = list(
reversed([op.reverse() for op in self.ops])
)
return downgrade_ops
def reverse(self) -> DowngradeOps:
return self.reverse_into(DowngradeOps(ops=[]))
class DowngradeOps(OpContainer):
"""contains a sequence of operations that would apply to the
'downgrade' stream of a script.
.. seealso::
:ref:`customizing_revision`
"""
def __init__(
self,
ops: Sequence[MigrateOperation] = (),
downgrade_token: str = "downgrades",
) -> None:
super().__init__(ops=ops)
self.downgrade_token = downgrade_token
def reverse(self) -> UpgradeOps:
return UpgradeOps(
ops=list(reversed([op.reverse() for op in self.ops]))
)
class MigrationScript(MigrateOperation):
"""represents a migration script.
E.g. when autogenerate encounters this object, this corresponds to the
production of an actual script file.
A normal :class:`.MigrationScript` object would contain a single
:class:`.UpgradeOps` and a single :class:`.DowngradeOps` directive.
These are accessible via the ``.upgrade_ops`` and ``.downgrade_ops``
attributes.
In the case of an autogenerate operation that runs multiple times,
such as the multiple database example in the "multidb" template,
the ``.upgrade_ops`` and ``.downgrade_ops`` attributes are disabled,
and instead these objects should be accessed via the ``.upgrade_ops_list``
and ``.downgrade_ops_list`` list-based attributes. These latter
attributes are always available at the very least as single-element lists.
.. seealso::
:ref:`customizing_revision`
"""
_needs_render: Optional[bool]
_upgrade_ops: List[UpgradeOps]
_downgrade_ops: List[DowngradeOps]
def __init__(
self,
rev_id: Optional[str],
upgrade_ops: UpgradeOps,
downgrade_ops: DowngradeOps,
*,
message: Optional[str] = None,
imports: Set[str] = set(),
head: Optional[str] = None,
splice: Optional[bool] = None,
branch_label: Optional[_RevIdType] = None,
version_path: Optional[str] = None,
depends_on: Optional[_RevIdType] = None,
) -> None:
self.rev_id = rev_id
self.message = message
self.imports = imports
self.head = head
self.splice = splice
self.branch_label = branch_label
self.version_path = version_path
self.depends_on = depends_on
self.upgrade_ops = upgrade_ops
self.downgrade_ops = downgrade_ops
@property
def upgrade_ops(self) -> Optional[UpgradeOps]:
"""An instance of :class:`.UpgradeOps`.
.. seealso::
:attr:`.MigrationScript.upgrade_ops_list`
"""
if len(self._upgrade_ops) > 1:
raise ValueError(
"This MigrationScript instance has a multiple-entry "
"list for UpgradeOps; please use the "
"upgrade_ops_list attribute."
)
elif not self._upgrade_ops:
return None
else:
return self._upgrade_ops[0]
@upgrade_ops.setter
def upgrade_ops(
self, upgrade_ops: Union[UpgradeOps, List[UpgradeOps]]
) -> None:
self._upgrade_ops = util.to_list(upgrade_ops)
for elem in self._upgrade_ops:
assert isinstance(elem, UpgradeOps)
@property
def downgrade_ops(self) -> Optional[DowngradeOps]:
"""An instance of :class:`.DowngradeOps`.
.. seealso::
:attr:`.MigrationScript.downgrade_ops_list`
"""
if len(self._downgrade_ops) > 1:
raise ValueError(
"This MigrationScript instance has a multiple-entry "
"list for DowngradeOps; please use the "
"downgrade_ops_list attribute."
)
elif not self._downgrade_ops:
return None
else:
return self._downgrade_ops[0]
@downgrade_ops.setter
def downgrade_ops(
self, downgrade_ops: Union[DowngradeOps, List[DowngradeOps]]
) -> None:
self._downgrade_ops = util.to_list(downgrade_ops)
for elem in self._downgrade_ops:
assert isinstance(elem, DowngradeOps)
@property
def upgrade_ops_list(self) -> List[UpgradeOps]:
"""A list of :class:`.UpgradeOps` instances.
This is used in place of the :attr:`.MigrationScript.upgrade_ops`
attribute when dealing with a revision operation that does
multiple autogenerate passes.
"""
return self._upgrade_ops
@property
def downgrade_ops_list(self) -> List[DowngradeOps]:
"""A list of :class:`.DowngradeOps` instances.
This is used in place of the :attr:`.MigrationScript.downgrade_ops`
attribute when dealing with a revision operation that does
multiple autogenerate passes.
"""
return self._downgrade_ops
|
# mypy: allow-untyped-defs, allow-incomplete-defs, allow-untyped-calls
# mypy: no-warn-return-any, allow-any-generics
from __future__ import annotations
from typing import Any
from typing import Dict
from typing import List
from typing import Optional
from typing import Sequence
from typing import Tuple
from typing import TYPE_CHECKING
from typing import Union
from sqlalchemy import schema as sa_schema
from sqlalchemy.sql.schema import Column
from sqlalchemy.sql.schema import Constraint
from sqlalchemy.sql.schema import Index
from sqlalchemy.types import Integer
from sqlalchemy.types import NULLTYPE
from .. import util
from ..util import sqla_compat
if TYPE_CHECKING:
from sqlalchemy.sql.elements import ColumnElement
from sqlalchemy.sql.elements import TextClause
from sqlalchemy.sql.schema import CheckConstraint
from sqlalchemy.sql.schema import ForeignKey
from sqlalchemy.sql.schema import ForeignKeyConstraint
from sqlalchemy.sql.schema import MetaData
from sqlalchemy.sql.schema import PrimaryKeyConstraint
from sqlalchemy.sql.schema import Table
from sqlalchemy.sql.schema import UniqueConstraint
from sqlalchemy.sql.type_api import TypeEngine
from ..runtime.migration import MigrationContext
class SchemaObjects:
def __init__(
self, migration_context: Optional[MigrationContext] = None
) -> None:
self.migration_context = migration_context
def primary_key_constraint(
self,
name: Optional[sqla_compat._ConstraintNameDefined],
table_name: str,
cols: Sequence[str],
schema: Optional[str] = None,
**dialect_kw,
) -> PrimaryKeyConstraint:
m = self.metadata()
columns = [sa_schema.Column(n, NULLTYPE) for n in cols]
t = sa_schema.Table(table_name, m, *columns, schema=schema)
# SQLAlchemy primary key constraint name arg is wrongly typed on
# the SQLAlchemy side through 2.0.5 at least
p = sa_schema.PrimaryKeyConstraint(
*[t.c[n] for n in cols], name=name, **dialect_kw # type: ignore
)
return p
def foreign_key_constraint(
self,
name: Optional[sqla_compat._ConstraintNameDefined],
source: str,
referent: str,
local_cols: List[str],
remote_cols: List[str],
onupdate: Optional[str] = None,
ondelete: Optional[str] = None,
deferrable: Optional[bool] = None,
source_schema: Optional[str] = None,
referent_schema: Optional[str] = None,
initially: Optional[str] = None,
match: Optional[str] = None,
**dialect_kw,
) -> ForeignKeyConstraint:
m = self.metadata()
if source == referent and source_schema == referent_schema:
t1_cols = local_cols + remote_cols
else:
t1_cols = local_cols
sa_schema.Table(
referent,
m,
*[sa_schema.Column(n, NULLTYPE) for n in remote_cols],
schema=referent_schema,
)
t1 = sa_schema.Table(
source,
m,
*[
sa_schema.Column(n, NULLTYPE)
for n in util.unique_list(t1_cols)
],
schema=source_schema,
)
tname = (
"%s.%s" % (referent_schema, referent)
if referent_schema
else referent
)
dialect_kw["match"] = match
f = sa_schema.ForeignKeyConstraint(
local_cols,
["%s.%s" % (tname, n) for n in remote_cols],
name=name,
onupdate=onupdate,
ondelete=ondelete,
deferrable=deferrable,
initially=initially,
**dialect_kw,
)
t1.append_constraint(f)
return f
def unique_constraint(
self,
name: Optional[sqla_compat._ConstraintNameDefined],
source: str,
local_cols: Sequence[str],
schema: Optional[str] = None,
**kw,
) -> UniqueConstraint:
t = sa_schema.Table(
source,
self.metadata(),
*[sa_schema.Column(n, NULLTYPE) for n in local_cols],
schema=schema,
)
kw["name"] = name
uq = sa_schema.UniqueConstraint(*[t.c[n] for n in local_cols], **kw)
# TODO: need event tests to ensure the event
# is fired off here
t.append_constraint(uq)
return uq
def check_constraint(
self,
name: Optional[sqla_compat._ConstraintNameDefined],
source: str,
condition: Union[str, TextClause, ColumnElement[Any]],
schema: Optional[str] = None,
**kw,
) -> Union[CheckConstraint]:
t = sa_schema.Table(
source,
self.metadata(),
sa_schema.Column("x", Integer),
schema=schema,
)
ck = sa_schema.CheckConstraint(condition, name=name, **kw)
t.append_constraint(ck)
return ck
def generic_constraint(
self,
name: Optional[sqla_compat._ConstraintNameDefined],
table_name: str,
type_: Optional[str],
schema: Optional[str] = None,
**kw,
) -> Any:
t = self.table(table_name, schema=schema)
types: Dict[Optional[str], Any] = {
"foreignkey": lambda name: sa_schema.ForeignKeyConstraint(
[], [], name=name
),
"primary": sa_schema.PrimaryKeyConstraint,
"unique": sa_schema.UniqueConstraint,
"check": lambda name: sa_schema.CheckConstraint("", name=name),
None: sa_schema.Constraint,
}
try:
const = types[type_]
except KeyError as ke:
raise TypeError(
"'type' can be one of %s"
% ", ".join(sorted(repr(x) for x in types))
) from ke
else:
const = const(name=name)
t.append_constraint(const)
return const
def metadata(self) -> MetaData:
kw = {}
if (
self.migration_context is not None
and "target_metadata" in self.migration_context.opts
):
mt = self.migration_context.opts["target_metadata"]
if hasattr(mt, "naming_convention"):
kw["naming_convention"] = mt.naming_convention
return sa_schema.MetaData(**kw)
def table(self, name: str, *columns, **kw) -> Table:
m = self.metadata()
cols = [
sqla_compat._copy(c) if c.table is not None else c
for c in columns
if isinstance(c, Column)
]
# these flags have already added their UniqueConstraint /
# Index objects to the table, so flip them off here.
# SQLAlchemy tometadata() avoids this instead by preserving the
# flags and skipping the constraints that have _type_bound on them,
# but for a migration we'd rather list out the constraints
# explicitly.
_constraints_included = kw.pop("_constraints_included", False)
if _constraints_included:
for c in cols:
c.unique = c.index = False
t = sa_schema.Table(name, m, *cols, **kw)
constraints = [
(
sqla_compat._copy(elem, target_table=t)
if getattr(elem, "parent", None) is not t
and getattr(elem, "parent", None) is not None
else elem
)
for elem in columns
if isinstance(elem, (Constraint, Index))
]
for const in constraints:
t.append_constraint(const)
for f in t.foreign_keys:
self._ensure_table_for_fk(m, f)
return t
def column(self, name: str, type_: TypeEngine, **kw) -> Column:
return sa_schema.Column(name, type_, **kw)
def index(
self,
name: Optional[str],
tablename: Optional[str],
columns: Sequence[Union[str, TextClause, ColumnElement[Any]]],
schema: Optional[str] = None,
**kw,
) -> Index:
t = sa_schema.Table(
tablename or "no_table",
self.metadata(),
schema=schema,
)
kw["_table"] = t
idx = sa_schema.Index(
name,
*[util.sqla_compat._textual_index_column(t, n) for n in columns],
**kw,
)
return idx
def _parse_table_key(self, table_key: str) -> Tuple[Optional[str], str]:
if "." in table_key:
tokens = table_key.split(".")
sname: Optional[str] = ".".join(tokens[0:-1])
tname = tokens[-1]
else:
tname = table_key
sname = None
return (sname, tname)
def _ensure_table_for_fk(self, metadata: MetaData, fk: ForeignKey) -> None:
"""create a placeholder Table object for the referent of a
ForeignKey.
"""
if isinstance(fk._colspec, str):
table_key, cname = fk._colspec.rsplit(".", 1)
sname, tname = self._parse_table_key(table_key)
if table_key not in metadata.tables:
rel_t = sa_schema.Table(tname, metadata, schema=sname)
else:
rel_t = metadata.tables[table_key]
if cname not in rel_t.c:
rel_t.append_column(sa_schema.Column(cname, NULLTYPE))
|
# mypy: allow-untyped-defs, allow-incomplete-defs, allow-untyped-calls
# mypy: no-warn-return-any, allow-any-generics
from typing import TYPE_CHECKING
from sqlalchemy import schema as sa_schema
from . import ops
from .base import Operations
from ..util.sqla_compat import _copy
if TYPE_CHECKING:
from sqlalchemy.sql.schema import Table
@Operations.implementation_for(ops.AlterColumnOp)
def alter_column(
operations: "Operations", operation: "ops.AlterColumnOp"
) -> None:
compiler = operations.impl.dialect.statement_compiler(
operations.impl.dialect, None
)
existing_type = operation.existing_type
existing_nullable = operation.existing_nullable
existing_server_default = operation.existing_server_default
type_ = operation.modify_type
column_name = operation.column_name
table_name = operation.table_name
schema = operation.schema
server_default = operation.modify_server_default
new_column_name = operation.modify_name
nullable = operation.modify_nullable
comment = operation.modify_comment
existing_comment = operation.existing_comment
def _count_constraint(constraint):
return not isinstance(constraint, sa_schema.PrimaryKeyConstraint) and (
not constraint._create_rule or constraint._create_rule(compiler)
)
if existing_type and type_:
t = operations.schema_obj.table(
table_name,
sa_schema.Column(column_name, existing_type),
schema=schema,
)
for constraint in t.constraints:
if _count_constraint(constraint):
operations.impl.drop_constraint(constraint)
operations.impl.alter_column(
table_name,
column_name,
nullable=nullable,
server_default=server_default,
name=new_column_name,
type_=type_,
schema=schema,
existing_type=existing_type,
existing_server_default=existing_server_default,
existing_nullable=existing_nullable,
comment=comment,
existing_comment=existing_comment,
**operation.kw,
)
if type_:
t = operations.schema_obj.table(
table_name,
operations.schema_obj.column(column_name, type_),
schema=schema,
)
for constraint in t.constraints:
if _count_constraint(constraint):
operations.impl.add_constraint(constraint)
@Operations.implementation_for(ops.DropTableOp)
def drop_table(operations: "Operations", operation: "ops.DropTableOp") -> None:
kw = {}
if operation.if_exists is not None:
kw["if_exists"] = operation.if_exists
operations.impl.drop_table(
operation.to_table(operations.migration_context), **kw
)
@Operations.implementation_for(ops.DropColumnOp)
def drop_column(
operations: "Operations", operation: "ops.DropColumnOp"
) -> None:
column = operation.to_column(operations.migration_context)
operations.impl.drop_column(
operation.table_name, column, schema=operation.schema, **operation.kw
)
@Operations.implementation_for(ops.CreateIndexOp)
def create_index(
operations: "Operations", operation: "ops.CreateIndexOp"
) -> None:
idx = operation.to_index(operations.migration_context)
kw = {}
if operation.if_not_exists is not None:
kw["if_not_exists"] = operation.if_not_exists
operations.impl.create_index(idx, **kw)
@Operations.implementation_for(ops.DropIndexOp)
def drop_index(operations: "Operations", operation: "ops.DropIndexOp") -> None:
kw = {}
if operation.if_exists is not None:
kw["if_exists"] = operation.if_exists
operations.impl.drop_index(
operation.to_index(operations.migration_context),
**kw,
)
@Operations.implementation_for(ops.CreateTableOp)
def create_table(
operations: "Operations", operation: "ops.CreateTableOp"
) -> "Table":
kw = {}
if operation.if_not_exists is not None:
kw["if_not_exists"] = operation.if_not_exists
table = operation.to_table(operations.migration_context)
operations.impl.create_table(table, **kw)
return table
@Operations.implementation_for(ops.RenameTableOp)
def rename_table(
operations: "Operations", operation: "ops.RenameTableOp"
) -> None:
operations.impl.rename_table(
operation.table_name, operation.new_table_name, schema=operation.schema
)
@Operations.implementation_for(ops.CreateTableCommentOp)
def create_table_comment(
operations: "Operations", operation: "ops.CreateTableCommentOp"
) -> None:
table = operation.to_table(operations.migration_context)
operations.impl.create_table_comment(table)
@Operations.implementation_for(ops.DropTableCommentOp)
def drop_table_comment(
operations: "Operations", operation: "ops.DropTableCommentOp"
) -> None:
table = operation.to_table(operations.migration_context)
operations.impl.drop_table_comment(table)
@Operations.implementation_for(ops.AddColumnOp)
def add_column(operations: "Operations", operation: "ops.AddColumnOp") -> None:
table_name = operation.table_name
column = operation.column
schema = operation.schema
kw = operation.kw
if column.table is not None:
column = _copy(column)
t = operations.schema_obj.table(table_name, column, schema=schema)
operations.impl.add_column(table_name, column, schema=schema, **kw)
for constraint in t.constraints:
if not isinstance(constraint, sa_schema.PrimaryKeyConstraint):
operations.impl.add_constraint(constraint)
for index in t.indexes:
operations.impl.create_index(index)
with_comment = (
operations.impl.dialect.supports_comments
and not operations.impl.dialect.inline_comments
)
comment = column.comment
if comment and with_comment:
operations.impl.create_column_comment(column)
@Operations.implementation_for(ops.AddConstraintOp)
def create_constraint(
operations: "Operations", operation: "ops.AddConstraintOp"
) -> None:
operations.impl.add_constraint(
operation.to_constraint(operations.migration_context)
)
@Operations.implementation_for(ops.DropConstraintOp)
def drop_constraint(
operations: "Operations", operation: "ops.DropConstraintOp"
) -> None:
operations.impl.drop_constraint(
operations.schema_obj.generic_constraint(
operation.constraint_name,
operation.table_name,
operation.constraint_type,
schema=operation.schema,
)
)
@Operations.implementation_for(ops.BulkInsertOp)
def bulk_insert(
operations: "Operations", operation: "ops.BulkInsertOp"
) -> None:
operations.impl.bulk_insert( # type: ignore[union-attr]
operation.table, operation.rows, multiinsert=operation.multiinsert
)
@Operations.implementation_for(ops.ExecuteSQLOp)
def execute_sql(
operations: "Operations", operation: "ops.ExecuteSQLOp"
) -> None:
operations.migration_context.impl.execute(
operation.sqltext, execution_options=operation.execution_options
)
|
from . import toimpl
from .base import AbstractOperations
from .base import BatchOperations
from .base import Operations
from .ops import MigrateOperation
from .ops import MigrationScript
__all__ = [
"AbstractOperations",
"Operations",
"BatchOperations",
"MigrateOperation",
"MigrationScript",
]
|
from __future__ import annotations
from typing import Any
from typing import Callable
from typing import Collection
from typing import Dict
from typing import List
from typing import Mapping
from typing import MutableMapping
from typing import Optional
from typing import overload
from typing import Sequence
from typing import TextIO
from typing import Tuple
from typing import TYPE_CHECKING
from typing import Union
from sqlalchemy.sql.schema import Column
from sqlalchemy.sql.schema import FetchedValue
from typing_extensions import ContextManager
from typing_extensions import Literal
from .migration import _ProxyTransaction
from .migration import MigrationContext
from .. import util
from ..operations import Operations
from ..script.revision import _GetRevArg
if TYPE_CHECKING:
from sqlalchemy.engine import URL
from sqlalchemy.engine.base import Connection
from sqlalchemy.sql import Executable
from sqlalchemy.sql.schema import MetaData
from sqlalchemy.sql.schema import SchemaItem
from sqlalchemy.sql.type_api import TypeEngine
from .migration import MigrationInfo
from ..autogenerate.api import AutogenContext
from ..config import Config
from ..ddl import DefaultImpl
from ..operations.ops import MigrationScript
from ..script.base import ScriptDirectory
_RevNumber = Optional[Union[str, Tuple[str, ...]]]
ProcessRevisionDirectiveFn = Callable[
[MigrationContext, _GetRevArg, List["MigrationScript"]], None
]
RenderItemFn = Callable[
[str, Any, "AutogenContext"], Union[str, Literal[False]]
]
NameFilterType = Literal[
"schema",
"table",
"column",
"index",
"unique_constraint",
"foreign_key_constraint",
]
NameFilterParentNames = MutableMapping[
Literal["schema_name", "table_name", "schema_qualified_table_name"],
Optional[str],
]
IncludeNameFn = Callable[
[Optional[str], NameFilterType, NameFilterParentNames], bool
]
IncludeObjectFn = Callable[
[
"SchemaItem",
Optional[str],
NameFilterType,
bool,
Optional["SchemaItem"],
],
bool,
]
OnVersionApplyFn = Callable[
[MigrationContext, "MigrationInfo", Collection[Any], Mapping[str, Any]],
None,
]
CompareServerDefault = Callable[
[
MigrationContext,
"Column[Any]",
"Column[Any]",
Optional[str],
Optional[FetchedValue],
Optional[str],
],
Optional[bool],
]
CompareType = Callable[
[
MigrationContext,
"Column[Any]",
"Column[Any]",
"TypeEngine[Any]",
"TypeEngine[Any]",
],
Optional[bool],
]
class EnvironmentContext(util.ModuleClsProxy):
"""A configurational facade made available in an ``env.py`` script.
The :class:`.EnvironmentContext` acts as a *facade* to the more
nuts-and-bolts objects of :class:`.MigrationContext` as well as certain
aspects of :class:`.Config`,
within the context of the ``env.py`` script that is invoked by
most Alembic commands.
:class:`.EnvironmentContext` is normally instantiated
when a command in :mod:`alembic.command` is run. It then makes
itself available in the ``alembic.context`` module for the scope
of the command. From within an ``env.py`` script, the current
:class:`.EnvironmentContext` is available by importing this module.
:class:`.EnvironmentContext` also supports programmatic usage.
At this level, it acts as a Python context manager, that is, is
intended to be used using the
``with:`` statement. A typical use of :class:`.EnvironmentContext`::
from alembic.config import Config
from alembic.script import ScriptDirectory
config = Config()
config.set_main_option("script_location", "myapp:migrations")
script = ScriptDirectory.from_config(config)
def my_function(rev, context):
'''do something with revision "rev", which
will be the current database revision,
and "context", which is the MigrationContext
that the env.py will create'''
with EnvironmentContext(
config,
script,
fn=my_function,
as_sql=False,
starting_rev="base",
destination_rev="head",
tag="sometag",
):
script.run_env()
The above script will invoke the ``env.py`` script
within the migration environment. If and when ``env.py``
calls :meth:`.MigrationContext.run_migrations`, the
``my_function()`` function above will be called
by the :class:`.MigrationContext`, given the context
itself as well as the current revision in the database.
.. note::
For most API usages other than full blown
invocation of migration scripts, the :class:`.MigrationContext`
and :class:`.ScriptDirectory` objects can be created and
used directly. The :class:`.EnvironmentContext` object
is *only* needed when you need to actually invoke the
``env.py`` module present in the migration environment.
"""
_migration_context: Optional[MigrationContext] = None
config: Config = None # type:ignore[assignment]
"""An instance of :class:`.Config` representing the
configuration file contents as well as other variables
set programmatically within it."""
script: ScriptDirectory = None # type:ignore[assignment]
"""An instance of :class:`.ScriptDirectory` which provides
programmatic access to version files within the ``versions/``
directory.
"""
def __init__(
self, config: Config, script: ScriptDirectory, **kw: Any
) -> None:
r"""Construct a new :class:`.EnvironmentContext`.
:param config: a :class:`.Config` instance.
:param script: a :class:`.ScriptDirectory` instance.
:param \**kw: keyword options that will be ultimately
passed along to the :class:`.MigrationContext` when
:meth:`.EnvironmentContext.configure` is called.
"""
self.config = config
self.script = script
self.context_opts = kw
def __enter__(self) -> EnvironmentContext:
"""Establish a context which provides a
:class:`.EnvironmentContext` object to
env.py scripts.
The :class:`.EnvironmentContext` will
be made available as ``from alembic import context``.
"""
self._install_proxy()
return self
def __exit__(self, *arg: Any, **kw: Any) -> None:
self._remove_proxy()
def is_offline_mode(self) -> bool:
"""Return True if the current migrations environment
is running in "offline mode".
This is ``True`` or ``False`` depending
on the ``--sql`` flag passed.
This function does not require that the :class:`.MigrationContext`
has been configured.
"""
return self.context_opts.get("as_sql", False) # type: ignore[no-any-return] # noqa: E501
def is_transactional_ddl(self) -> bool:
"""Return True if the context is configured to expect a
transactional DDL capable backend.
This defaults to the type of database in use, and
can be overridden by the ``transactional_ddl`` argument
to :meth:`.configure`
This function requires that a :class:`.MigrationContext`
has first been made available via :meth:`.configure`.
"""
return self.get_context().impl.transactional_ddl
def requires_connection(self) -> bool:
return not self.is_offline_mode()
def get_head_revision(self) -> _RevNumber:
"""Return the hex identifier of the 'head' script revision.
If the script directory has multiple heads, this
method raises a :class:`.CommandError`;
:meth:`.EnvironmentContext.get_head_revisions` should be preferred.
This function does not require that the :class:`.MigrationContext`
has been configured.
.. seealso:: :meth:`.EnvironmentContext.get_head_revisions`
"""
return self.script.as_revision_number("head")
def get_head_revisions(self) -> _RevNumber:
"""Return the hex identifier of the 'heads' script revision(s).
This returns a tuple containing the version number of all
heads in the script directory.
This function does not require that the :class:`.MigrationContext`
has been configured.
"""
return self.script.as_revision_number("heads")
def get_starting_revision_argument(self) -> _RevNumber:
"""Return the 'starting revision' argument,
if the revision was passed using ``start:end``.
This is only meaningful in "offline" mode.
Returns ``None`` if no value is available
or was configured.
This function does not require that the :class:`.MigrationContext`
has been configured.
"""
if self._migration_context is not None:
return self.script.as_revision_number(
self.get_context()._start_from_rev
)
elif "starting_rev" in self.context_opts:
return self.script.as_revision_number(
self.context_opts["starting_rev"]
)
else:
# this should raise only in the case that a command
# is being run where the "starting rev" is never applicable;
# this is to catch scripts which rely upon this in
# non-sql mode or similar
raise util.CommandError(
"No starting revision argument is available."
)
def get_revision_argument(self) -> _RevNumber:
"""Get the 'destination' revision argument.
This is typically the argument passed to the
``upgrade`` or ``downgrade`` command.
If it was specified as ``head``, the actual
version number is returned; if specified
as ``base``, ``None`` is returned.
This function does not require that the :class:`.MigrationContext`
has been configured.
"""
return self.script.as_revision_number(
self.context_opts["destination_rev"]
)
def get_tag_argument(self) -> Optional[str]:
"""Return the value passed for the ``--tag`` argument, if any.
The ``--tag`` argument is not used directly by Alembic,
but is available for custom ``env.py`` configurations that
wish to use it; particularly for offline generation scripts
that wish to generate tagged filenames.
This function does not require that the :class:`.MigrationContext`
has been configured.
.. seealso::
:meth:`.EnvironmentContext.get_x_argument` - a newer and more
open ended system of extending ``env.py`` scripts via the command
line.
"""
return self.context_opts.get("tag", None) # type: ignore[no-any-return] # noqa: E501
@overload
def get_x_argument(self, as_dictionary: Literal[False]) -> List[str]: ...
@overload
def get_x_argument(
self, as_dictionary: Literal[True]
) -> Dict[str, str]: ...
@overload
def get_x_argument(
self, as_dictionary: bool = ...
) -> Union[List[str], Dict[str, str]]: ...
def get_x_argument(
self, as_dictionary: bool = False
) -> Union[List[str], Dict[str, str]]:
"""Return the value(s) passed for the ``-x`` argument, if any.
The ``-x`` argument is an open ended flag that allows any user-defined
value or values to be passed on the command line, then available
here for consumption by a custom ``env.py`` script.
The return value is a list, returned directly from the ``argparse``
structure. If ``as_dictionary=True`` is passed, the ``x`` arguments
are parsed using ``key=value`` format into a dictionary that is
then returned. If there is no ``=`` in the argument, value is an empty
string.
.. versionchanged:: 1.13.1 Support ``as_dictionary=True`` when
arguments are passed without the ``=`` symbol.
For example, to support passing a database URL on the command line,
the standard ``env.py`` script can be modified like this::
cmd_line_url = context.get_x_argument(
as_dictionary=True).get('dbname')
if cmd_line_url:
engine = create_engine(cmd_line_url)
else:
engine = engine_from_config(
config.get_section(config.config_ini_section),
prefix='sqlalchemy.',
poolclass=pool.NullPool)
This then takes effect by running the ``alembic`` script as::
alembic -x dbname=postgresql://user:pass@host/dbname upgrade head
This function does not require that the :class:`.MigrationContext`
has been configured.
.. seealso::
:meth:`.EnvironmentContext.get_tag_argument`
:attr:`.Config.cmd_opts`
"""
if self.config.cmd_opts is not None:
value = self.config.cmd_opts.x or []
else:
value = []
if as_dictionary:
dict_value = {}
for arg in value:
x_key, _, x_value = arg.partition("=")
dict_value[x_key] = x_value
value = dict_value
return value
def configure(
self,
connection: Optional[Connection] = None,
url: Optional[Union[str, URL]] = None,
dialect_name: Optional[str] = None,
dialect_opts: Optional[Dict[str, Any]] = None,
transactional_ddl: Optional[bool] = None,
transaction_per_migration: bool = False,
output_buffer: Optional[TextIO] = None,
starting_rev: Optional[str] = None,
tag: Optional[str] = None,
template_args: Optional[Dict[str, Any]] = None,
render_as_batch: bool = False,
target_metadata: Union[MetaData, Sequence[MetaData], None] = None,
include_name: Optional[IncludeNameFn] = None,
include_object: Optional[IncludeObjectFn] = None,
include_schemas: bool = False,
process_revision_directives: Optional[
ProcessRevisionDirectiveFn
] = None,
compare_type: Union[bool, CompareType] = True,
compare_server_default: Union[bool, CompareServerDefault] = False,
render_item: Optional[RenderItemFn] = None,
literal_binds: bool = False,
upgrade_token: str = "upgrades",
downgrade_token: str = "downgrades",
alembic_module_prefix: str = "op.",
sqlalchemy_module_prefix: str = "sa.",
user_module_prefix: Optional[str] = None,
on_version_apply: Optional[OnVersionApplyFn] = None,
**kw: Any,
) -> None:
"""Configure a :class:`.MigrationContext` within this
:class:`.EnvironmentContext` which will provide database
connectivity and other configuration to a series of
migration scripts.
Many methods on :class:`.EnvironmentContext` require that
this method has been called in order to function, as they
ultimately need to have database access or at least access
to the dialect in use. Those which do are documented as such.
The important thing needed by :meth:`.configure` is a
means to determine what kind of database dialect is in use.
An actual connection to that database is needed only if
the :class:`.MigrationContext` is to be used in
"online" mode.
If the :meth:`.is_offline_mode` function returns ``True``,
then no connection is needed here. Otherwise, the
``connection`` parameter should be present as an
instance of :class:`sqlalchemy.engine.Connection`.
This function is typically called from the ``env.py``
script within a migration environment. It can be called
multiple times for an invocation. The most recent
:class:`~sqlalchemy.engine.Connection`
for which it was called is the one that will be operated upon
by the next call to :meth:`.run_migrations`.
General parameters:
:param connection: a :class:`~sqlalchemy.engine.Connection`
to use
for SQL execution in "online" mode. When present, is also
used to determine the type of dialect in use.
:param url: a string database url, or a
:class:`sqlalchemy.engine.url.URL` object.
The type of dialect to be used will be derived from this if
``connection`` is not passed.
:param dialect_name: string name of a dialect, such as
"postgresql", "mssql", etc.
The type of dialect to be used will be derived from this if
``connection`` and ``url`` are not passed.
:param dialect_opts: dictionary of options to be passed to dialect
constructor.
:param transactional_ddl: Force the usage of "transactional"
DDL on or off;
this otherwise defaults to whether or not the dialect in
use supports it.
:param transaction_per_migration: if True, nest each migration script
in a transaction rather than the full series of migrations to
run.
:param output_buffer: a file-like object that will be used
for textual output
when the ``--sql`` option is used to generate SQL scripts.
Defaults to
``sys.stdout`` if not passed here and also not present on
the :class:`.Config`
object. The value here overrides that of the :class:`.Config`
object.
:param output_encoding: when using ``--sql`` to generate SQL
scripts, apply this encoding to the string output.
:param literal_binds: when using ``--sql`` to generate SQL
scripts, pass through the ``literal_binds`` flag to the compiler
so that any literal values that would ordinarily be bound
parameters are converted to plain strings.
.. warning:: Dialects can typically only handle simple datatypes
like strings and numbers for auto-literal generation. Datatypes
like dates, intervals, and others may still require manual
formatting, typically using :meth:`.Operations.inline_literal`.
.. note:: the ``literal_binds`` flag is ignored on SQLAlchemy
versions prior to 0.8 where this feature is not supported.
.. seealso::
:meth:`.Operations.inline_literal`
:param starting_rev: Override the "starting revision" argument
when using ``--sql`` mode.
:param tag: a string tag for usage by custom ``env.py`` scripts.
Set via the ``--tag`` option, can be overridden here.
:param template_args: dictionary of template arguments which
will be added to the template argument environment when
running the "revision" command. Note that the script environment
is only run within the "revision" command if the --autogenerate
option is used, or if the option "revision_environment=true"
is present in the alembic.ini file.
:param version_table: The name of the Alembic version table.
The default is ``'alembic_version'``.
:param version_table_schema: Optional schema to place version
table within.
:param version_table_pk: boolean, whether the Alembic version table
should use a primary key constraint for the "value" column; this
only takes effect when the table is first created.
Defaults to True; setting to False should not be necessary and is
here for backwards compatibility reasons.
:param on_version_apply: a callable or collection of callables to be
run for each migration step.
The callables will be run in the order they are given, once for
each migration step, after the respective operation has been
applied but before its transaction is finalized.
Each callable accepts no positional arguments and the following
keyword arguments:
* ``ctx``: the :class:`.MigrationContext` running the migration,
* ``step``: a :class:`.MigrationInfo` representing the
step currently being applied,
* ``heads``: a collection of version strings representing the
current heads,
* ``run_args``: the ``**kwargs`` passed to :meth:`.run_migrations`.
Parameters specific to the autogenerate feature, when
``alembic revision`` is run with the ``--autogenerate`` feature:
:param target_metadata: a :class:`sqlalchemy.schema.MetaData`
object, or a sequence of :class:`~sqlalchemy.schema.MetaData`
objects, that will be consulted during autogeneration.
The tables present in each :class:`~sqlalchemy.schema.MetaData`
will be compared against
what is locally available on the target
:class:`~sqlalchemy.engine.Connection`
to produce candidate upgrade/downgrade operations.
:param compare_type: Indicates type comparison behavior during
an autogenerate
operation. Defaults to ``True`` turning on type comparison, which
has good accuracy on most backends. See :ref:`compare_types`
for an example as well as information on other type
comparison options. Set to ``False`` which disables type
comparison. A callable can also be passed to provide custom type
comparison, see :ref:`compare_types` for additional details.
.. versionchanged:: 1.12.0 The default value of
:paramref:`.EnvironmentContext.configure.compare_type` has been
changed to ``True``.
.. seealso::
:ref:`compare_types`
:paramref:`.EnvironmentContext.configure.compare_server_default`
:param compare_server_default: Indicates server default comparison
behavior during
an autogenerate operation. Defaults to ``False`` which disables
server default
comparison. Set to ``True`` to turn on server default comparison,
which has
varied accuracy depending on backend.
To customize server default comparison behavior, a callable may
be specified
which can filter server default comparisons during an
autogenerate operation.
defaults during an autogenerate operation. The format of this
callable is::
def my_compare_server_default(context, inspected_column,
metadata_column, inspected_default, metadata_default,
rendered_metadata_default):
# return True if the defaults are different,
# False if not, or None to allow the default implementation
# to compare these defaults
return None
context.configure(
# ...
compare_server_default = my_compare_server_default
)
``inspected_column`` is a dictionary structure as returned by
:meth:`sqlalchemy.engine.reflection.Inspector.get_columns`, whereas
``metadata_column`` is a :class:`sqlalchemy.schema.Column` from
the local model environment.
A return value of ``None`` indicates to allow default server default
comparison
to proceed. Note that some backends such as Postgresql actually
execute
the two defaults on the database side to compare for equivalence.
.. seealso::
:paramref:`.EnvironmentContext.configure.compare_type`
:param include_name: A callable function which is given
the chance to return ``True`` or ``False`` for any database reflected
object based on its name, including database schema names when
the :paramref:`.EnvironmentContext.configure.include_schemas` flag
is set to ``True``.
The function accepts the following positional arguments:
* ``name``: the name of the object, such as schema name or table name.
Will be ``None`` when indicating the default schema name of the
database connection.
* ``type``: a string describing the type of object; currently
``"schema"``, ``"table"``, ``"column"``, ``"index"``,
``"unique_constraint"``, or ``"foreign_key_constraint"``
* ``parent_names``: a dictionary of "parent" object names, that are
relative to the name being given. Keys in this dictionary may
include: ``"schema_name"``, ``"table_name"`` or
``"schema_qualified_table_name"``.
E.g.::
def include_name(name, type_, parent_names):
if type_ == "schema":
return name in ["schema_one", "schema_two"]
else:
return True
context.configure(
# ...
include_schemas = True,
include_name = include_name
)
.. seealso::
:ref:`autogenerate_include_hooks`
:paramref:`.EnvironmentContext.configure.include_object`
:paramref:`.EnvironmentContext.configure.include_schemas`
:param include_object: A callable function which is given
the chance to return ``True`` or ``False`` for any object,
indicating if the given object should be considered in the
autogenerate sweep.
The function accepts the following positional arguments:
* ``object``: a :class:`~sqlalchemy.schema.SchemaItem` object such
as a :class:`~sqlalchemy.schema.Table`,
:class:`~sqlalchemy.schema.Column`,
:class:`~sqlalchemy.schema.Index`
:class:`~sqlalchemy.schema.UniqueConstraint`,
or :class:`~sqlalchemy.schema.ForeignKeyConstraint` object
* ``name``: the name of the object. This is typically available
via ``object.name``.
* ``type``: a string describing the type of object; currently
``"table"``, ``"column"``, ``"index"``, ``"unique_constraint"``,
or ``"foreign_key_constraint"``
* ``reflected``: ``True`` if the given object was produced based on
table reflection, ``False`` if it's from a local :class:`.MetaData`
object.
* ``compare_to``: the object being compared against, if available,
else ``None``.
E.g.::
def include_object(object, name, type_, reflected, compare_to):
if (type_ == "column" and
not reflected and
object.info.get("skip_autogenerate", False)):
return False
else:
return True
context.configure(
# ...
include_object = include_object
)
For the use case of omitting specific schemas from a target database
when :paramref:`.EnvironmentContext.configure.include_schemas` is
set to ``True``, the :attr:`~sqlalchemy.schema.Table.schema`
attribute can be checked for each :class:`~sqlalchemy.schema.Table`
object passed to the hook, however it is much more efficient
to filter on schemas before reflection of objects takes place
using the :paramref:`.EnvironmentContext.configure.include_name`
hook.
.. seealso::
:ref:`autogenerate_include_hooks`
:paramref:`.EnvironmentContext.configure.include_name`
:paramref:`.EnvironmentContext.configure.include_schemas`
:param render_as_batch: if True, commands which alter elements
within a table will be placed under a ``with batch_alter_table():``
directive, so that batch migrations will take place.
.. seealso::
:ref:`batch_migrations`
:param include_schemas: If True, autogenerate will scan across
all schemas located by the SQLAlchemy
:meth:`~sqlalchemy.engine.reflection.Inspector.get_schema_names`
method, and include all differences in tables found across all
those schemas. When using this option, you may want to also
use the :paramref:`.EnvironmentContext.configure.include_name`
parameter to specify a callable which
can filter the tables/schemas that get included.
.. seealso::
:ref:`autogenerate_include_hooks`
:paramref:`.EnvironmentContext.configure.include_name`
:paramref:`.EnvironmentContext.configure.include_object`
:param render_item: Callable that can be used to override how
any schema item, i.e. column, constraint, type,
etc., is rendered for autogenerate. The callable receives a
string describing the type of object, the object, and
the autogen context. If it returns False, the
default rendering method will be used. If it returns None,
the item will not be rendered in the context of a Table
construct, that is, can be used to skip columns or constraints
within op.create_table()::
def my_render_column(type_, col, autogen_context):
if type_ == "column" and isinstance(col, MySpecialCol):
return repr(col)
else:
return False
context.configure(
# ...
render_item = my_render_column
)
Available values for the type string include: ``"column"``,
``"primary_key"``, ``"foreign_key"``, ``"unique"``, ``"check"``,
``"type"``, ``"server_default"``.
.. seealso::
:ref:`autogen_render_types`
:param upgrade_token: When autogenerate completes, the text of the
candidate upgrade operations will be present in this template
variable when ``script.py.mako`` is rendered. Defaults to
``upgrades``.
:param downgrade_token: When autogenerate completes, the text of the
candidate downgrade operations will be present in this
template variable when ``script.py.mako`` is rendered. Defaults to
``downgrades``.
:param alembic_module_prefix: When autogenerate refers to Alembic
:mod:`alembic.operations` constructs, this prefix will be used
(i.e. ``op.create_table``) Defaults to "``op.``".
Can be ``None`` to indicate no prefix.
:param sqlalchemy_module_prefix: When autogenerate refers to
SQLAlchemy
:class:`~sqlalchemy.schema.Column` or type classes, this prefix
will be used
(i.e. ``sa.Column("somename", sa.Integer)``) Defaults to "``sa.``".
Can be ``None`` to indicate no prefix.
Note that when dialect-specific types are rendered, autogenerate
will render them using the dialect module name, i.e. ``mssql.BIT()``,
``postgresql.UUID()``.
:param user_module_prefix: When autogenerate refers to a SQLAlchemy
type (e.g. :class:`.TypeEngine`) where the module name is not
under the ``sqlalchemy`` namespace, this prefix will be used
within autogenerate. If left at its default of
``None``, the ``__module__`` attribute of the type is used to
render the import module. It's a good practice to set this
and to have all custom types be available from a fixed module space,
in order to future-proof migration files against reorganizations
in modules.
.. seealso::
:ref:`autogen_module_prefix`
:param process_revision_directives: a callable function that will
be passed a structure representing the end result of an autogenerate
or plain "revision" operation, which can be manipulated to affect
how the ``alembic revision`` command ultimately outputs new
revision scripts. The structure of the callable is::
def process_revision_directives(context, revision, directives):
pass
The ``directives`` parameter is a Python list containing
a single :class:`.MigrationScript` directive, which represents
the revision file to be generated. This list as well as its
contents may be freely modified to produce any set of commands.
The section :ref:`customizing_revision` shows an example of
doing this. The ``context`` parameter is the
:class:`.MigrationContext` in use,
and ``revision`` is a tuple of revision identifiers representing the
current revision of the database.
The callable is invoked at all times when the ``--autogenerate``
option is passed to ``alembic revision``. If ``--autogenerate``
is not passed, the callable is invoked only if the
``revision_environment`` variable is set to True in the Alembic
configuration, in which case the given ``directives`` collection
will contain empty :class:`.UpgradeOps` and :class:`.DowngradeOps`
collections for ``.upgrade_ops`` and ``.downgrade_ops``. The
``--autogenerate`` option itself can be inferred by inspecting
``context.config.cmd_opts.autogenerate``.
The callable function may optionally be an instance of
a :class:`.Rewriter` object. This is a helper object that
assists in the production of autogenerate-stream rewriter functions.
.. seealso::
:ref:`customizing_revision`
:ref:`autogen_rewriter`
:paramref:`.command.revision.process_revision_directives`
Parameters specific to individual backends:
:param mssql_batch_separator: The "batch separator" which will
be placed between each statement when generating offline SQL Server
migrations. Defaults to ``GO``. Note this is in addition to the
customary semicolon ``;`` at the end of each statement; SQL Server
considers the "batch separator" to denote the end of an
individual statement execution, and cannot group certain
dependent operations in one step.
:param oracle_batch_separator: The "batch separator" which will
be placed between each statement when generating offline
Oracle migrations. Defaults to ``/``. Oracle doesn't add a
semicolon between statements like most other backends.
"""
opts = self.context_opts
if transactional_ddl is not None:
opts["transactional_ddl"] = transactional_ddl
if output_buffer is not None:
opts["output_buffer"] = output_buffer
elif self.config.output_buffer is not None:
opts["output_buffer"] = self.config.output_buffer
if starting_rev:
opts["starting_rev"] = starting_rev
if tag:
opts["tag"] = tag
if template_args and "template_args" in opts:
opts["template_args"].update(template_args)
opts["transaction_per_migration"] = transaction_per_migration
opts["target_metadata"] = target_metadata
opts["include_name"] = include_name
opts["include_object"] = include_object
opts["include_schemas"] = include_schemas
opts["render_as_batch"] = render_as_batch
opts["upgrade_token"] = upgrade_token
opts["downgrade_token"] = downgrade_token
opts["sqlalchemy_module_prefix"] = sqlalchemy_module_prefix
opts["alembic_module_prefix"] = alembic_module_prefix
opts["user_module_prefix"] = user_module_prefix
opts["literal_binds"] = literal_binds
opts["process_revision_directives"] = process_revision_directives
opts["on_version_apply"] = util.to_tuple(on_version_apply, default=())
if render_item is not None:
opts["render_item"] = render_item
opts["compare_type"] = compare_type
if compare_server_default is not None:
opts["compare_server_default"] = compare_server_default
opts["script"] = self.script
opts.update(kw)
self._migration_context = MigrationContext.configure(
connection=connection,
url=url,
dialect_name=dialect_name,
environment_context=self,
dialect_opts=dialect_opts,
opts=opts,
)
def run_migrations(self, **kw: Any) -> None:
"""Run migrations as determined by the current command line
configuration
as well as versioning information present (or not) in the current
database connection (if one is present).
The function accepts optional ``**kw`` arguments. If these are
passed, they are sent directly to the ``upgrade()`` and
``downgrade()``
functions within each target revision file. By modifying the
``script.py.mako`` file so that the ``upgrade()`` and ``downgrade()``
functions accept arguments, parameters can be passed here so that
contextual information, usually information to identify a particular
database in use, can be passed from a custom ``env.py`` script
to the migration functions.
This function requires that a :class:`.MigrationContext` has
first been made available via :meth:`.configure`.
"""
assert self._migration_context is not None
with Operations.context(self._migration_context):
self.get_context().run_migrations(**kw)
def execute(
self,
sql: Union[Executable, str],
execution_options: Optional[Dict[str, Any]] = None,
) -> None:
"""Execute the given SQL using the current change context.
The behavior of :meth:`.execute` is the same
as that of :meth:`.Operations.execute`. Please see that
function's documentation for full detail including
caveats and limitations.
This function requires that a :class:`.MigrationContext` has
first been made available via :meth:`.configure`.
"""
self.get_context().execute(sql, execution_options=execution_options)
def static_output(self, text: str) -> None:
"""Emit text directly to the "offline" SQL stream.
Typically this is for emitting comments that
start with --. The statement is not treated
as a SQL execution, no ; or batch separator
is added, etc.
"""
self.get_context().impl.static_output(text)
def begin_transaction(
self,
) -> Union[_ProxyTransaction, ContextManager[None, Optional[bool]]]:
"""Return a context manager that will
enclose an operation within a "transaction",
as defined by the environment's offline
and transactional DDL settings.
e.g.::
with context.begin_transaction():
context.run_migrations()
:meth:`.begin_transaction` is intended to
"do the right thing" regardless of
calling context:
* If :meth:`.is_transactional_ddl` is ``False``,
returns a "do nothing" context manager
which otherwise produces no transactional
state or directives.
* If :meth:`.is_offline_mode` is ``True``,
returns a context manager that will
invoke the :meth:`.DefaultImpl.emit_begin`
and :meth:`.DefaultImpl.emit_commit`
methods, which will produce the string
directives ``BEGIN`` and ``COMMIT`` on
the output stream, as rendered by the
target backend (e.g. SQL Server would
emit ``BEGIN TRANSACTION``).
* Otherwise, calls :meth:`sqlalchemy.engine.Connection.begin`
on the current online connection, which
returns a :class:`sqlalchemy.engine.Transaction`
object. This object demarcates a real
transaction and is itself a context manager,
which will roll back if an exception
is raised.
Note that a custom ``env.py`` script which
has more specific transactional needs can of course
manipulate the :class:`~sqlalchemy.engine.Connection`
directly to produce transactional state in "online"
mode.
"""
return self.get_context().begin_transaction()
def get_context(self) -> MigrationContext:
"""Return the current :class:`.MigrationContext` object.
If :meth:`.EnvironmentContext.configure` has not been
called yet, raises an exception.
"""
if self._migration_context is None:
raise Exception("No context has been configured yet.")
return self._migration_context
def get_bind(self) -> Connection:
"""Return the current 'bind'.
In "online" mode, this is the
:class:`sqlalchemy.engine.Connection` currently being used
to emit SQL to the database.
This function requires that a :class:`.MigrationContext`
has first been made available via :meth:`.configure`.
"""
return self.get_context().bind # type: ignore[return-value]
def get_impl(self) -> DefaultImpl:
return self.get_context().impl
|
# mypy: allow-untyped-defs, allow-incomplete-defs, allow-untyped-calls
# mypy: no-warn-return-any, allow-any-generics
from __future__ import annotations
from contextlib import contextmanager
from contextlib import nullcontext
import logging
import sys
from typing import Any
from typing import Callable
from typing import cast
from typing import Collection
from typing import Dict
from typing import Iterable
from typing import Iterator
from typing import List
from typing import Optional
from typing import Set
from typing import Tuple
from typing import TYPE_CHECKING
from typing import Union
from sqlalchemy import Column
from sqlalchemy import literal_column
from sqlalchemy import select
from sqlalchemy.engine import Engine
from sqlalchemy.engine import url as sqla_url
from sqlalchemy.engine.strategies import MockEngineStrategy
from typing_extensions import ContextManager
from .. import ddl
from .. import util
from ..util import sqla_compat
from ..util.compat import EncodedIO
if TYPE_CHECKING:
from sqlalchemy.engine import Dialect
from sqlalchemy.engine import URL
from sqlalchemy.engine.base import Connection
from sqlalchemy.engine.base import Transaction
from sqlalchemy.engine.mock import MockConnection
from sqlalchemy.sql import Executable
from .environment import EnvironmentContext
from ..config import Config
from ..script.base import Script
from ..script.base import ScriptDirectory
from ..script.revision import _RevisionOrBase
from ..script.revision import Revision
from ..script.revision import RevisionMap
log = logging.getLogger(__name__)
class _ProxyTransaction:
def __init__(self, migration_context: MigrationContext) -> None:
self.migration_context = migration_context
@property
def _proxied_transaction(self) -> Optional[Transaction]:
return self.migration_context._transaction
def rollback(self) -> None:
t = self._proxied_transaction
assert t is not None
t.rollback()
self.migration_context._transaction = None
def commit(self) -> None:
t = self._proxied_transaction
assert t is not None
t.commit()
self.migration_context._transaction = None
def __enter__(self) -> _ProxyTransaction:
return self
def __exit__(self, type_: Any, value: Any, traceback: Any) -> None:
if self._proxied_transaction is not None:
self._proxied_transaction.__exit__(type_, value, traceback)
self.migration_context._transaction = None
class MigrationContext:
"""Represent the database state made available to a migration
script.
:class:`.MigrationContext` is the front end to an actual
database connection, or alternatively a string output
stream given a particular database dialect,
from an Alembic perspective.
When inside the ``env.py`` script, the :class:`.MigrationContext`
is available via the
:meth:`.EnvironmentContext.get_context` method,
which is available at ``alembic.context``::
# from within env.py script
from alembic import context
migration_context = context.get_context()
For usage outside of an ``env.py`` script, such as for
utility routines that want to check the current version
in the database, the :meth:`.MigrationContext.configure`
method to create new :class:`.MigrationContext` objects.
For example, to get at the current revision in the
database using :meth:`.MigrationContext.get_current_revision`::
# in any application, outside of an env.py script
from alembic.migration import MigrationContext
from sqlalchemy import create_engine
engine = create_engine("postgresql://mydatabase")
conn = engine.connect()
context = MigrationContext.configure(conn)
current_rev = context.get_current_revision()
The above context can also be used to produce
Alembic migration operations with an :class:`.Operations`
instance::
# in any application, outside of the normal Alembic environment
from alembic.operations import Operations
op = Operations(context)
op.alter_column("mytable", "somecolumn", nullable=True)
"""
def __init__(
self,
dialect: Dialect,
connection: Optional[Connection],
opts: Dict[str, Any],
environment_context: Optional[EnvironmentContext] = None,
) -> None:
self.environment_context = environment_context
self.opts = opts
self.dialect = dialect
self.script: Optional[ScriptDirectory] = opts.get("script")
as_sql: bool = opts.get("as_sql", False)
transactional_ddl = opts.get("transactional_ddl")
self._transaction_per_migration = opts.get(
"transaction_per_migration", False
)
self.on_version_apply_callbacks = opts.get("on_version_apply", ())
self._transaction: Optional[Transaction] = None
if as_sql:
self.connection = cast(
Optional["Connection"], self._stdout_connection(connection)
)
assert self.connection is not None
self._in_external_transaction = False
else:
self.connection = connection
self._in_external_transaction = (
sqla_compat._get_connection_in_transaction(connection)
)
self._migrations_fn: Optional[
Callable[..., Iterable[RevisionStep]]
] = opts.get("fn")
self.as_sql = as_sql
self.purge = opts.get("purge", False)
if "output_encoding" in opts:
self.output_buffer = EncodedIO(
opts.get("output_buffer")
or sys.stdout, # type:ignore[arg-type]
opts["output_encoding"],
)
else:
self.output_buffer = opts.get("output_buffer", sys.stdout)
self._user_compare_type = opts.get("compare_type", True)
self._user_compare_server_default = opts.get(
"compare_server_default", False
)
self.version_table = version_table = opts.get(
"version_table", "alembic_version"
)
self.version_table_schema = version_table_schema = opts.get(
"version_table_schema", None
)
self._start_from_rev: Optional[str] = opts.get("starting_rev")
self.impl = ddl.DefaultImpl.get_by_dialect(dialect)(
dialect,
self.connection,
self.as_sql,
transactional_ddl,
self.output_buffer,
opts,
)
self._version = self.impl.version_table_impl(
version_table=version_table,
version_table_schema=version_table_schema,
version_table_pk=opts.get("version_table_pk", True),
)
log.info("Context impl %s.", self.impl.__class__.__name__)
if self.as_sql:
log.info("Generating static SQL")
log.info(
"Will assume %s DDL.",
(
"transactional"
if self.impl.transactional_ddl
else "non-transactional"
),
)
@classmethod
def configure(
cls,
connection: Optional[Connection] = None,
url: Optional[Union[str, URL]] = None,
dialect_name: Optional[str] = None,
dialect: Optional[Dialect] = None,
environment_context: Optional[EnvironmentContext] = None,
dialect_opts: Optional[Dict[str, str]] = None,
opts: Optional[Any] = None,
) -> MigrationContext:
"""Create a new :class:`.MigrationContext`.
This is a factory method usually called
by :meth:`.EnvironmentContext.configure`.
:param connection: a :class:`~sqlalchemy.engine.Connection`
to use for SQL execution in "online" mode. When present,
is also used to determine the type of dialect in use.
:param url: a string database url, or a
:class:`sqlalchemy.engine.url.URL` object.
The type of dialect to be used will be derived from this if
``connection`` is not passed.
:param dialect_name: string name of a dialect, such as
"postgresql", "mssql", etc. The type of dialect to be used will be
derived from this if ``connection`` and ``url`` are not passed.
:param opts: dictionary of options. Most other options
accepted by :meth:`.EnvironmentContext.configure` are passed via
this dictionary.
"""
if opts is None:
opts = {}
if dialect_opts is None:
dialect_opts = {}
if connection:
if isinstance(connection, Engine):
raise util.CommandError(
"'connection' argument to configure() is expected "
"to be a sqlalchemy.engine.Connection instance, "
"got %r" % connection,
)
dialect = connection.dialect
elif url:
url_obj = sqla_url.make_url(url)
dialect = url_obj.get_dialect()(**dialect_opts)
elif dialect_name:
url_obj = sqla_url.make_url("%s://" % dialect_name)
dialect = url_obj.get_dialect()(**dialect_opts)
elif not dialect:
raise Exception("Connection, url, or dialect_name is required.")
assert dialect is not None
return MigrationContext(dialect, connection, opts, environment_context)
@contextmanager
def autocommit_block(self) -> Iterator[None]:
"""Enter an "autocommit" block, for databases that support AUTOCOMMIT
isolation levels.
This special directive is intended to support the occasional database
DDL or system operation that specifically has to be run outside of
any kind of transaction block. The PostgreSQL database platform
is the most common target for this style of operation, as many
of its DDL operations must be run outside of transaction blocks, even
though the database overall supports transactional DDL.
The method is used as a context manager within a migration script, by
calling on :meth:`.Operations.get_context` to retrieve the
:class:`.MigrationContext`, then invoking
:meth:`.MigrationContext.autocommit_block` using the ``with:``
statement::
def upgrade():
with op.get_context().autocommit_block():
op.execute("ALTER TYPE mood ADD VALUE 'soso'")
Above, a PostgreSQL "ALTER TYPE..ADD VALUE" directive is emitted,
which must be run outside of a transaction block at the database level.
The :meth:`.MigrationContext.autocommit_block` method makes use of the
SQLAlchemy ``AUTOCOMMIT`` isolation level setting, which against the
psycogp2 DBAPI corresponds to the ``connection.autocommit`` setting,
to ensure that the database driver is not inside of a DBAPI level
transaction block.
.. warning::
As is necessary, **the database transaction preceding the block is
unconditionally committed**. This means that the run of migrations
preceding the operation will be committed, before the overall
migration operation is complete.
It is recommended that when an application includes migrations with
"autocommit" blocks, that
:paramref:`.EnvironmentContext.transaction_per_migration` be used
so that the calling environment is tuned to expect short per-file
migrations whether or not one of them has an autocommit block.
"""
_in_connection_transaction = self._in_connection_transaction()
if self.impl.transactional_ddl and self.as_sql:
self.impl.emit_commit()
elif _in_connection_transaction:
assert self._transaction is not None
self._transaction.commit()
self._transaction = None
if not self.as_sql:
assert self.connection is not None
current_level = self.connection.get_isolation_level()
base_connection = self.connection
# in 1.3 and 1.4 non-future mode, the connection gets switched
# out. we can use the base connection with the new mode
# except that it will not know it's in "autocommit" and will
# emit deprecation warnings when an autocommit action takes
# place.
self.connection = self.impl.connection = (
base_connection.execution_options(isolation_level="AUTOCOMMIT")
)
# sqlalchemy future mode will "autobegin" in any case, so take
# control of that "transaction" here
fake_trans: Optional[Transaction] = self.connection.begin()
else:
fake_trans = None
try:
yield
finally:
if not self.as_sql:
assert self.connection is not None
if fake_trans is not None:
fake_trans.commit()
self.connection.execution_options(
isolation_level=current_level
)
self.connection = self.impl.connection = base_connection
if self.impl.transactional_ddl and self.as_sql:
self.impl.emit_begin()
elif _in_connection_transaction:
assert self.connection is not None
self._transaction = self.connection.begin()
def begin_transaction(
self, _per_migration: bool = False
) -> Union[_ProxyTransaction, ContextManager[None, Optional[bool]]]:
"""Begin a logical transaction for migration operations.
This method is used within an ``env.py`` script to demarcate where
the outer "transaction" for a series of migrations begins. Example::
def run_migrations_online():
connectable = create_engine(...)
with connectable.connect() as connection:
context.configure(
connection=connection, target_metadata=target_metadata
)
with context.begin_transaction():
context.run_migrations()
Above, :meth:`.MigrationContext.begin_transaction` is used to demarcate
where the outer logical transaction occurs around the
:meth:`.MigrationContext.run_migrations` operation.
A "Logical" transaction means that the operation may or may not
correspond to a real database transaction. If the target database
supports transactional DDL (or
:paramref:`.EnvironmentContext.configure.transactional_ddl` is true),
the :paramref:`.EnvironmentContext.configure.transaction_per_migration`
flag is not set, and the migration is against a real database
connection (as opposed to using "offline" ``--sql`` mode), a real
transaction will be started. If ``--sql`` mode is in effect, the
operation would instead correspond to a string such as "BEGIN" being
emitted to the string output.
The returned object is a Python context manager that should only be
used in the context of a ``with:`` statement as indicated above.
The object has no other guaranteed API features present.
.. seealso::
:meth:`.MigrationContext.autocommit_block`
"""
if self._in_external_transaction:
return nullcontext()
if self.impl.transactional_ddl:
transaction_now = _per_migration == self._transaction_per_migration
else:
transaction_now = _per_migration is True
if not transaction_now:
return nullcontext()
elif not self.impl.transactional_ddl:
assert _per_migration
if self.as_sql:
return nullcontext()
else:
# track our own notion of a "transaction block", which must be
# committed when complete. Don't rely upon whether or not the
# SQLAlchemy connection reports as "in transaction"; this
# because SQLAlchemy future connection features autobegin
# behavior, so it may already be in a transaction from our
# emitting of queries like "has_version_table", etc. While we
# could track these operations as well, that leaves open the
# possibility of new operations or other things happening in
# the user environment that still may be triggering
# "autobegin".
in_transaction = self._transaction is not None
if in_transaction:
return nullcontext()
else:
assert self.connection is not None
self._transaction = (
sqla_compat._safe_begin_connection_transaction(
self.connection
)
)
return _ProxyTransaction(self)
elif self.as_sql:
@contextmanager
def begin_commit():
self.impl.emit_begin()
yield
self.impl.emit_commit()
return begin_commit()
else:
assert self.connection is not None
self._transaction = sqla_compat._safe_begin_connection_transaction(
self.connection
)
return _ProxyTransaction(self)
def get_current_revision(self) -> Optional[str]:
"""Return the current revision, usually that which is present
in the ``alembic_version`` table in the database.
This method intends to be used only for a migration stream that
does not contain unmerged branches in the target database;
if there are multiple branches present, an exception is raised.
The :meth:`.MigrationContext.get_current_heads` should be preferred
over this method going forward in order to be compatible with
branch migration support.
If this :class:`.MigrationContext` was configured in "offline"
mode, that is with ``as_sql=True``, the ``starting_rev``
parameter is returned instead, if any.
"""
heads = self.get_current_heads()
if len(heads) == 0:
return None
elif len(heads) > 1:
raise util.CommandError(
"Version table '%s' has more than one head present; "
"please use get_current_heads()" % self.version_table
)
else:
return heads[0]
def get_current_heads(self) -> Tuple[str, ...]:
"""Return a tuple of the current 'head versions' that are represented
in the target database.
For a migration stream without branches, this will be a single
value, synonymous with that of
:meth:`.MigrationContext.get_current_revision`. However when multiple
unmerged branches exist within the target database, the returned tuple
will contain a value for each head.
If this :class:`.MigrationContext` was configured in "offline"
mode, that is with ``as_sql=True``, the ``starting_rev``
parameter is returned in a one-length tuple.
If no version table is present, or if there are no revisions
present, an empty tuple is returned.
"""
if self.as_sql:
start_from_rev: Any = self._start_from_rev
if start_from_rev == "base":
start_from_rev = None
elif start_from_rev is not None and self.script:
start_from_rev = [
self.script.get_revision(sfr).revision
for sfr in util.to_list(start_from_rev)
if sfr not in (None, "base")
]
return util.to_tuple(start_from_rev, default=())
else:
if self._start_from_rev:
raise util.CommandError(
"Can't specify current_rev to context "
"when using a database connection"
)
if not self._has_version_table():
return ()
assert self.connection is not None
return tuple(
row[0]
for row in self.connection.execute(
select(self._version.c.version_num)
)
)
def _ensure_version_table(self, purge: bool = False) -> None:
with sqla_compat._ensure_scope_for_ddl(self.connection):
assert self.connection is not None
self._version.create(self.connection, checkfirst=True)
if purge:
assert self.connection is not None
self.connection.execute(self._version.delete())
def _has_version_table(self) -> bool:
assert self.connection is not None
return sqla_compat._connectable_has_table(
self.connection, self.version_table, self.version_table_schema
)
def stamp(self, script_directory: ScriptDirectory, revision: str) -> None:
"""Stamp the version table with a specific revision.
This method calculates those branches to which the given revision
can apply, and updates those branches as though they were migrated
towards that revision (either up or down). If no current branches
include the revision, it is added as a new branch head.
"""
heads = self.get_current_heads()
if not self.as_sql and not heads:
self._ensure_version_table()
head_maintainer = HeadMaintainer(self, heads)
for step in script_directory._stamp_revs(revision, heads):
head_maintainer.update_to_step(step)
def run_migrations(self, **kw: Any) -> None:
r"""Run the migration scripts established for this
:class:`.MigrationContext`, if any.
The commands in :mod:`alembic.command` will set up a function
that is ultimately passed to the :class:`.MigrationContext`
as the ``fn`` argument. This function represents the "work"
that will be done when :meth:`.MigrationContext.run_migrations`
is called, typically from within the ``env.py`` script of the
migration environment. The "work function" then provides an iterable
of version callables and other version information which
in the case of the ``upgrade`` or ``downgrade`` commands are the
list of version scripts to invoke. Other commands yield nothing,
in the case that a command wants to run some other operation
against the database such as the ``current`` or ``stamp`` commands.
:param \**kw: keyword arguments here will be passed to each
migration callable, that is the ``upgrade()`` or ``downgrade()``
method within revision scripts.
"""
self.impl.start_migrations()
heads: Tuple[str, ...]
if self.purge:
if self.as_sql:
raise util.CommandError("Can't use --purge with --sql mode")
self._ensure_version_table(purge=True)
heads = ()
else:
heads = self.get_current_heads()
dont_mutate = self.opts.get("dont_mutate", False)
if not self.as_sql and not heads and not dont_mutate:
self._ensure_version_table()
head_maintainer = HeadMaintainer(self, heads)
assert self._migrations_fn is not None
for step in self._migrations_fn(heads, self):
with self.begin_transaction(_per_migration=True):
if self.as_sql and not head_maintainer.heads:
# for offline mode, include a CREATE TABLE from
# the base
assert self.connection is not None
self._version.create(self.connection)
log.info("Running %s", step)
if self.as_sql:
self.impl.static_output(
"-- Running %s" % (step.short_log,)
)
step.migration_fn(**kw)
# previously, we wouldn't stamp per migration
# if we were in a transaction, however given the more
# complex model that involves any number of inserts
# and row-targeted updates and deletes, it's simpler for now
# just to run the operations on every version
head_maintainer.update_to_step(step)
for callback in self.on_version_apply_callbacks:
callback(
ctx=self,
step=step.info,
heads=set(head_maintainer.heads),
run_args=kw,
)
if self.as_sql and not head_maintainer.heads:
assert self.connection is not None
self._version.drop(self.connection)
def _in_connection_transaction(self) -> bool:
try:
meth = self.connection.in_transaction # type:ignore[union-attr]
except AttributeError:
return False
else:
return meth()
def execute(
self,
sql: Union[Executable, str],
execution_options: Optional[Dict[str, Any]] = None,
) -> None:
"""Execute a SQL construct or string statement.
The underlying execution mechanics are used, that is
if this is "offline mode" the SQL is written to the
output buffer, otherwise the SQL is emitted on
the current SQLAlchemy connection.
"""
self.impl._exec(sql, execution_options)
def _stdout_connection(
self, connection: Optional[Connection]
) -> MockConnection:
def dump(construct, *multiparams, **params):
self.impl._exec(construct)
return MockEngineStrategy.MockConnection(self.dialect, dump)
@property
def bind(self) -> Optional[Connection]:
"""Return the current "bind".
In online mode, this is an instance of
:class:`sqlalchemy.engine.Connection`, and is suitable
for ad-hoc execution of any kind of usage described
in SQLAlchemy Core documentation as well as
for usage with the :meth:`sqlalchemy.schema.Table.create`
and :meth:`sqlalchemy.schema.MetaData.create_all` methods
of :class:`~sqlalchemy.schema.Table`,
:class:`~sqlalchemy.schema.MetaData`.
Note that when "standard output" mode is enabled,
this bind will be a "mock" connection handler that cannot
return results and is only appropriate for a very limited
subset of commands.
"""
return self.connection
@property
def config(self) -> Optional[Config]:
"""Return the :class:`.Config` used by the current environment,
if any."""
if self.environment_context:
return self.environment_context.config
else:
return None
def _compare_type(
self, inspector_column: Column[Any], metadata_column: Column
) -> bool:
if self._user_compare_type is False:
return False
if callable(self._user_compare_type):
user_value = self._user_compare_type(
self,
inspector_column,
metadata_column,
inspector_column.type,
metadata_column.type,
)
if user_value is not None:
return user_value
return self.impl.compare_type(inspector_column, metadata_column)
def _compare_server_default(
self,
inspector_column: Column[Any],
metadata_column: Column[Any],
rendered_metadata_default: Optional[str],
rendered_column_default: Optional[str],
) -> bool:
if self._user_compare_server_default is False:
return False
if callable(self._user_compare_server_default):
user_value = self._user_compare_server_default(
self,
inspector_column,
metadata_column,
rendered_column_default,
metadata_column.server_default,
rendered_metadata_default,
)
if user_value is not None:
return user_value
return self.impl.compare_server_default(
inspector_column,
metadata_column,
rendered_metadata_default,
rendered_column_default,
)
class HeadMaintainer:
def __init__(self, context: MigrationContext, heads: Any) -> None:
self.context = context
self.heads = set(heads)
def _insert_version(self, version: str) -> None:
assert version not in self.heads
self.heads.add(version)
self.context.impl._exec(
self.context._version.insert().values(
version_num=literal_column("'%s'" % version)
)
)
def _delete_version(self, version: str) -> None:
self.heads.remove(version)
ret = self.context.impl._exec(
self.context._version.delete().where(
self.context._version.c.version_num
== literal_column("'%s'" % version)
)
)
if (
not self.context.as_sql
and self.context.dialect.supports_sane_rowcount
and ret is not None
and ret.rowcount != 1
):
raise util.CommandError(
"Online migration expected to match one "
"row when deleting '%s' in '%s'; "
"%d found"
% (version, self.context.version_table, ret.rowcount)
)
def _update_version(self, from_: str, to_: str) -> None:
assert to_ not in self.heads
self.heads.remove(from_)
self.heads.add(to_)
ret = self.context.impl._exec(
self.context._version.update()
.values(version_num=literal_column("'%s'" % to_))
.where(
self.context._version.c.version_num
== literal_column("'%s'" % from_)
)
)
if (
not self.context.as_sql
and self.context.dialect.supports_sane_rowcount
and ret is not None
and ret.rowcount != 1
):
raise util.CommandError(
"Online migration expected to match one "
"row when updating '%s' to '%s' in '%s'; "
"%d found"
% (from_, to_, self.context.version_table, ret.rowcount)
)
def update_to_step(self, step: Union[RevisionStep, StampStep]) -> None:
if step.should_delete_branch(self.heads):
vers = step.delete_version_num
log.debug("branch delete %s", vers)
self._delete_version(vers)
elif step.should_create_branch(self.heads):
vers = step.insert_version_num
log.debug("new branch insert %s", vers)
self._insert_version(vers)
elif step.should_merge_branches(self.heads):
# delete revs, update from rev, update to rev
(
delete_revs,
update_from_rev,
update_to_rev,
) = step.merge_branch_idents(self.heads)
log.debug(
"merge, delete %s, update %s to %s",
delete_revs,
update_from_rev,
update_to_rev,
)
for delrev in delete_revs:
self._delete_version(delrev)
self._update_version(update_from_rev, update_to_rev)
elif step.should_unmerge_branches(self.heads):
(
update_from_rev,
update_to_rev,
insert_revs,
) = step.unmerge_branch_idents(self.heads)
log.debug(
"unmerge, insert %s, update %s to %s",
insert_revs,
update_from_rev,
update_to_rev,
)
for insrev in insert_revs:
self._insert_version(insrev)
self._update_version(update_from_rev, update_to_rev)
else:
from_, to_ = step.update_version_num(self.heads)
log.debug("update %s to %s", from_, to_)
self._update_version(from_, to_)
class MigrationInfo:
"""Exposes information about a migration step to a callback listener.
The :class:`.MigrationInfo` object is available exclusively for the
benefit of the :paramref:`.EnvironmentContext.on_version_apply`
callback hook.
"""
is_upgrade: bool
"""True/False: indicates whether this operation ascends or descends the
version tree."""
is_stamp: bool
"""True/False: indicates whether this operation is a stamp (i.e. whether
it results in any actual database operations)."""
up_revision_id: Optional[str]
"""Version string corresponding to :attr:`.Revision.revision`.
In the case of a stamp operation, it is advised to use the
:attr:`.MigrationInfo.up_revision_ids` tuple as a stamp operation can
make a single movement from one or more branches down to a single
branchpoint, in which case there will be multiple "up" revisions.
.. seealso::
:attr:`.MigrationInfo.up_revision_ids`
"""
up_revision_ids: Tuple[str, ...]
"""Tuple of version strings corresponding to :attr:`.Revision.revision`.
In the majority of cases, this tuple will be a single value, synonymous
with the scalar value of :attr:`.MigrationInfo.up_revision_id`.
It can be multiple revision identifiers only in the case of an
``alembic stamp`` operation which is moving downwards from multiple
branches down to their common branch point.
"""
down_revision_ids: Tuple[str, ...]
"""Tuple of strings representing the base revisions of this migration step.
If empty, this represents a root revision; otherwise, the first item
corresponds to :attr:`.Revision.down_revision`, and the rest are inferred
from dependencies.
"""
revision_map: RevisionMap
"""The revision map inside of which this operation occurs."""
def __init__(
self,
revision_map: RevisionMap,
is_upgrade: bool,
is_stamp: bool,
up_revisions: Union[str, Tuple[str, ...]],
down_revisions: Union[str, Tuple[str, ...]],
) -> None:
self.revision_map = revision_map
self.is_upgrade = is_upgrade
self.is_stamp = is_stamp
self.up_revision_ids = util.to_tuple(up_revisions, default=())
if self.up_revision_ids:
self.up_revision_id = self.up_revision_ids[0]
else:
# this should never be the case with
# "upgrade", "downgrade", or "stamp" as we are always
# measuring movement in terms of at least one upgrade version
self.up_revision_id = None
self.down_revision_ids = util.to_tuple(down_revisions, default=())
@property
def is_migration(self) -> bool:
"""True/False: indicates whether this operation is a migration.
At present this is true if and only the migration is not a stamp.
If other operation types are added in the future, both this attribute
and :attr:`~.MigrationInfo.is_stamp` will be false.
"""
return not self.is_stamp
@property
def source_revision_ids(self) -> Tuple[str, ...]:
"""Active revisions before this migration step is applied."""
return (
self.down_revision_ids if self.is_upgrade else self.up_revision_ids
)
@property
def destination_revision_ids(self) -> Tuple[str, ...]:
"""Active revisions after this migration step is applied."""
return (
self.up_revision_ids if self.is_upgrade else self.down_revision_ids
)
@property
def up_revision(self) -> Optional[Revision]:
"""Get :attr:`~.MigrationInfo.up_revision_id` as
a :class:`.Revision`.
"""
return self.revision_map.get_revision(self.up_revision_id)
@property
def up_revisions(self) -> Tuple[Optional[_RevisionOrBase], ...]:
"""Get :attr:`~.MigrationInfo.up_revision_ids` as a
:class:`.Revision`."""
return self.revision_map.get_revisions(self.up_revision_ids)
@property
def down_revisions(self) -> Tuple[Optional[_RevisionOrBase], ...]:
"""Get :attr:`~.MigrationInfo.down_revision_ids` as a tuple of
:class:`Revisions <.Revision>`."""
return self.revision_map.get_revisions(self.down_revision_ids)
@property
def source_revisions(self) -> Tuple[Optional[_RevisionOrBase], ...]:
"""Get :attr:`~MigrationInfo.source_revision_ids` as a tuple of
:class:`Revisions <.Revision>`."""
return self.revision_map.get_revisions(self.source_revision_ids)
@property
def destination_revisions(self) -> Tuple[Optional[_RevisionOrBase], ...]:
"""Get :attr:`~MigrationInfo.destination_revision_ids` as a tuple of
:class:`Revisions <.Revision>`."""
return self.revision_map.get_revisions(self.destination_revision_ids)
class MigrationStep:
from_revisions_no_deps: Tuple[str, ...]
to_revisions_no_deps: Tuple[str, ...]
is_upgrade: bool
migration_fn: Any
if TYPE_CHECKING:
@property
def doc(self) -> Optional[str]: ...
@property
def name(self) -> str:
return self.migration_fn.__name__
@classmethod
def upgrade_from_script(
cls, revision_map: RevisionMap, script: Script
) -> RevisionStep:
return RevisionStep(revision_map, script, True)
@classmethod
def downgrade_from_script(
cls, revision_map: RevisionMap, script: Script
) -> RevisionStep:
return RevisionStep(revision_map, script, False)
@property
def is_downgrade(self) -> bool:
return not self.is_upgrade
@property
def short_log(self) -> str:
return "%s %s -> %s" % (
self.name,
util.format_as_comma(self.from_revisions_no_deps),
util.format_as_comma(self.to_revisions_no_deps),
)
def __str__(self):
if self.doc:
return "%s %s -> %s, %s" % (
self.name,
util.format_as_comma(self.from_revisions_no_deps),
util.format_as_comma(self.to_revisions_no_deps),
self.doc,
)
else:
return self.short_log
class RevisionStep(MigrationStep):
def __init__(
self, revision_map: RevisionMap, revision: Script, is_upgrade: bool
) -> None:
self.revision_map = revision_map
self.revision = revision
self.is_upgrade = is_upgrade
if is_upgrade:
self.migration_fn = revision.module.upgrade
else:
self.migration_fn = revision.module.downgrade
def __repr__(self):
return "RevisionStep(%r, is_upgrade=%r)" % (
self.revision.revision,
self.is_upgrade,
)
def __eq__(self, other: object) -> bool:
return (
isinstance(other, RevisionStep)
and other.revision == self.revision
and self.is_upgrade == other.is_upgrade
)
@property
def doc(self) -> Optional[str]:
return self.revision.doc
@property
def from_revisions(self) -> Tuple[str, ...]:
if self.is_upgrade:
return self.revision._normalized_down_revisions
else:
return (self.revision.revision,)
@property
def from_revisions_no_deps( # type:ignore[override]
self,
) -> Tuple[str, ...]:
if self.is_upgrade:
return self.revision._versioned_down_revisions
else:
return (self.revision.revision,)
@property
def to_revisions(self) -> Tuple[str, ...]:
if self.is_upgrade:
return (self.revision.revision,)
else:
return self.revision._normalized_down_revisions
@property
def to_revisions_no_deps( # type:ignore[override]
self,
) -> Tuple[str, ...]:
if self.is_upgrade:
return (self.revision.revision,)
else:
return self.revision._versioned_down_revisions
@property
def _has_scalar_down_revision(self) -> bool:
return len(self.revision._normalized_down_revisions) == 1
def should_delete_branch(self, heads: Set[str]) -> bool:
"""A delete is when we are a. in a downgrade and b.
we are going to the "base" or we are going to a version that
is implied as a dependency on another version that is remaining.
"""
if not self.is_downgrade:
return False
if self.revision.revision not in heads:
return False
downrevs = self.revision._normalized_down_revisions
if not downrevs:
# is a base
return True
else:
# determine what the ultimate "to_revisions" for an
# unmerge would be. If there are none, then we're a delete.
to_revisions = self._unmerge_to_revisions(heads)
return not to_revisions
def merge_branch_idents(
self, heads: Set[str]
) -> Tuple[List[str], str, str]:
other_heads = set(heads).difference(self.from_revisions)
if other_heads:
ancestors = {
r.revision
for r in self.revision_map._get_ancestor_nodes(
self.revision_map.get_revisions(other_heads), check=False
)
}
from_revisions = list(
set(self.from_revisions).difference(ancestors)
)
else:
from_revisions = list(self.from_revisions)
return (
# delete revs, update from rev, update to rev
list(from_revisions[0:-1]),
from_revisions[-1],
self.to_revisions[0],
)
def _unmerge_to_revisions(self, heads: Set[str]) -> Tuple[str, ...]:
other_heads = set(heads).difference([self.revision.revision])
if other_heads:
ancestors = {
r.revision
for r in self.revision_map._get_ancestor_nodes(
self.revision_map.get_revisions(other_heads), check=False
)
}
return tuple(set(self.to_revisions).difference(ancestors))
else:
# for each revision we plan to return, compute its ancestors
# (excluding self), and remove those from the final output since
# they are already accounted for.
ancestors = {
r.revision
for to_revision in self.to_revisions
for r in self.revision_map._get_ancestor_nodes(
self.revision_map.get_revisions(to_revision), check=False
)
if r.revision != to_revision
}
return tuple(set(self.to_revisions).difference(ancestors))
def unmerge_branch_idents(
self, heads: Set[str]
) -> Tuple[str, str, Tuple[str, ...]]:
to_revisions = self._unmerge_to_revisions(heads)
return (
# update from rev, update to rev, insert revs
self.from_revisions[0],
to_revisions[-1],
to_revisions[0:-1],
)
def should_create_branch(self, heads: Set[str]) -> bool:
if not self.is_upgrade:
return False
downrevs = self.revision._normalized_down_revisions
if not downrevs:
# is a base
return True
else:
# none of our downrevs are present, so...
# we have to insert our version. This is true whether
# or not there is only one downrev, or multiple (in the latter
# case, we're a merge point.)
if not heads.intersection(downrevs):
return True
else:
return False
def should_merge_branches(self, heads: Set[str]) -> bool:
if not self.is_upgrade:
return False
downrevs = self.revision._normalized_down_revisions
if len(downrevs) > 1 and len(heads.intersection(downrevs)) > 1:
return True
return False
def should_unmerge_branches(self, heads: Set[str]) -> bool:
if not self.is_downgrade:
return False
downrevs = self.revision._normalized_down_revisions
if self.revision.revision in heads and len(downrevs) > 1:
return True
return False
def update_version_num(self, heads: Set[str]) -> Tuple[str, str]:
if not self._has_scalar_down_revision:
downrev = heads.intersection(
self.revision._normalized_down_revisions
)
assert (
len(downrev) == 1
), "Can't do an UPDATE because downrevision is ambiguous"
down_revision = list(downrev)[0]
else:
down_revision = self.revision._normalized_down_revisions[0]
if self.is_upgrade:
return down_revision, self.revision.revision
else:
return self.revision.revision, down_revision
@property
def delete_version_num(self) -> str:
return self.revision.revision
@property
def insert_version_num(self) -> str:
return self.revision.revision
@property
def info(self) -> MigrationInfo:
return MigrationInfo(
revision_map=self.revision_map,
up_revisions=self.revision.revision,
down_revisions=self.revision._normalized_down_revisions,
is_upgrade=self.is_upgrade,
is_stamp=False,
)
class StampStep(MigrationStep):
def __init__(
self,
from_: Optional[Union[str, Collection[str]]],
to_: Optional[Union[str, Collection[str]]],
is_upgrade: bool,
branch_move: bool,
revision_map: Optional[RevisionMap] = None,
) -> None:
self.from_: Tuple[str, ...] = util.to_tuple(from_, default=())
self.to_: Tuple[str, ...] = util.to_tuple(to_, default=())
self.is_upgrade = is_upgrade
self.branch_move = branch_move
self.migration_fn = self.stamp_revision
self.revision_map = revision_map
doc: Optional[str] = None
def stamp_revision(self, **kw: Any) -> None:
return None
def __eq__(self, other):
return (
isinstance(other, StampStep)
and other.from_revisions == self.from_revisions
and other.to_revisions == self.to_revisions
and other.branch_move == self.branch_move
and self.is_upgrade == other.is_upgrade
)
@property
def from_revisions(self):
return self.from_
@property
def to_revisions(self) -> Tuple[str, ...]:
return self.to_
@property
def from_revisions_no_deps( # type:ignore[override]
self,
) -> Tuple[str, ...]:
return self.from_
@property
def to_revisions_no_deps( # type:ignore[override]
self,
) -> Tuple[str, ...]:
return self.to_
@property
def delete_version_num(self) -> str:
assert len(self.from_) == 1
return self.from_[0]
@property
def insert_version_num(self) -> str:
assert len(self.to_) == 1
return self.to_[0]
def update_version_num(self, heads: Set[str]) -> Tuple[str, str]:
assert len(self.from_) == 1
assert len(self.to_) == 1
return self.from_[0], self.to_[0]
def merge_branch_idents(
self, heads: Union[Set[str], List[str]]
) -> Union[Tuple[List[Any], str, str], Tuple[List[str], str, str]]:
return (
# delete revs, update from rev, update to rev
list(self.from_[0:-1]),
self.from_[-1],
self.to_[0],
)
def unmerge_branch_idents(
self, heads: Set[str]
) -> Tuple[str, str, List[str]]:
return (
# update from rev, update to rev, insert revs
self.from_[0],
self.to_[-1],
list(self.to_[0:-1]),
)
def should_delete_branch(self, heads: Set[str]) -> bool:
# TODO: we probably need to look for self.to_ inside of heads,
# in a similar manner as should_create_branch, however we have
# no tests for this yet (stamp downgrades w/ branches)
return self.is_downgrade and self.branch_move
def should_create_branch(self, heads: Set[str]) -> Union[Set[str], bool]:
return (
self.is_upgrade
and (self.branch_move or set(self.from_).difference(heads))
and set(self.to_).difference(heads)
)
def should_merge_branches(self, heads: Set[str]) -> bool:
return len(self.from_) > 1
def should_unmerge_branches(self, heads: Set[str]) -> bool:
return len(self.to_) > 1
@property
def info(self) -> MigrationInfo:
up, down = (
(self.to_, self.from_)
if self.is_upgrade
else (self.from_, self.to_)
)
assert self.revision_map is not None
return MigrationInfo(
revision_map=self.revision_map,
up_revisions=up,
down_revisions=down,
is_upgrade=self.is_upgrade,
is_stamp=True,
)
|
null |
from __future__ import annotations
from contextlib import contextmanager
import datetime
import os
import re
import shutil
import sys
from types import ModuleType
from typing import Any
from typing import cast
from typing import Iterator
from typing import List
from typing import Mapping
from typing import Optional
from typing import Sequence
from typing import Set
from typing import Tuple
from typing import TYPE_CHECKING
from typing import Union
from . import revision
from . import write_hooks
from .. import util
from ..runtime import migration
from ..util import compat
from ..util import not_none
if TYPE_CHECKING:
from .revision import _GetRevArg
from .revision import _RevIdType
from .revision import Revision
from ..config import Config
from ..config import MessagingOptions
from ..runtime.migration import RevisionStep
from ..runtime.migration import StampStep
try:
if compat.py39:
from zoneinfo import ZoneInfo
from zoneinfo import ZoneInfoNotFoundError
else:
from backports.zoneinfo import ZoneInfo # type: ignore[import-not-found,no-redef] # noqa: E501
from backports.zoneinfo import ZoneInfoNotFoundError # type: ignore[no-redef] # noqa: E501
except ImportError:
ZoneInfo = None # type: ignore[assignment, misc]
_sourceless_rev_file = re.compile(r"(?!\.\#|__init__)(.*\.py)(c|o)?$")
_only_source_rev_file = re.compile(r"(?!\.\#|__init__)(.*\.py)$")
_legacy_rev = re.compile(r"([a-f0-9]+)\.py$")
_slug_re = re.compile(r"\w+")
_default_file_template = "%(rev)s_%(slug)s"
_split_on_space_comma = re.compile(r", *|(?: +)")
_split_on_space_comma_colon = re.compile(r", *|(?: +)|\:")
class ScriptDirectory:
"""Provides operations upon an Alembic script directory.
This object is useful to get information as to current revisions,
most notably being able to get at the "head" revision, for schemes
that want to test if the current revision in the database is the most
recent::
from alembic.script import ScriptDirectory
from alembic.config import Config
config = Config()
config.set_main_option("script_location", "myapp:migrations")
script = ScriptDirectory.from_config(config)
head_revision = script.get_current_head()
"""
def __init__(
self,
dir: str, # noqa
file_template: str = _default_file_template,
truncate_slug_length: Optional[int] = 40,
version_locations: Optional[List[str]] = None,
sourceless: bool = False,
output_encoding: str = "utf-8",
timezone: Optional[str] = None,
hook_config: Optional[Mapping[str, str]] = None,
recursive_version_locations: bool = False,
messaging_opts: MessagingOptions = cast(
"MessagingOptions", util.EMPTY_DICT
),
) -> None:
self.dir = dir
self.file_template = file_template
self.version_locations = version_locations
self.truncate_slug_length = truncate_slug_length or 40
self.sourceless = sourceless
self.output_encoding = output_encoding
self.revision_map = revision.RevisionMap(self._load_revisions)
self.timezone = timezone
self.hook_config = hook_config
self.recursive_version_locations = recursive_version_locations
self.messaging_opts = messaging_opts
if not os.access(dir, os.F_OK):
raise util.CommandError(
"Path doesn't exist: %r. Please use "
"the 'init' command to create a new "
"scripts folder." % os.path.abspath(dir)
)
@property
def versions(self) -> str:
loc = self._version_locations
if len(loc) > 1:
raise util.CommandError("Multiple version_locations present")
else:
return loc[0]
@util.memoized_property
def _version_locations(self) -> Sequence[str]:
if self.version_locations:
return [
os.path.abspath(util.coerce_resource_to_filename(location))
for location in self.version_locations
]
else:
return (os.path.abspath(os.path.join(self.dir, "versions")),)
def _load_revisions(self) -> Iterator[Script]:
if self.version_locations:
paths = [
vers
for vers in self._version_locations
if os.path.exists(vers)
]
else:
paths = [self.versions]
dupes = set()
for vers in paths:
for file_path in Script._list_py_dir(self, vers):
real_path = os.path.realpath(file_path)
if real_path in dupes:
util.warn(
"File %s loaded twice! ignoring. Please ensure "
"version_locations is unique." % real_path
)
continue
dupes.add(real_path)
filename = os.path.basename(real_path)
dir_name = os.path.dirname(real_path)
script = Script._from_filename(self, dir_name, filename)
if script is None:
continue
yield script
@classmethod
def from_config(cls, config: Config) -> ScriptDirectory:
"""Produce a new :class:`.ScriptDirectory` given a :class:`.Config`
instance.
The :class:`.Config` need only have the ``script_location`` key
present.
"""
script_location = config.get_main_option("script_location")
if script_location is None:
raise util.CommandError(
"No 'script_location' key " "found in configuration."
)
truncate_slug_length: Optional[int]
tsl = config.get_main_option("truncate_slug_length")
if tsl is not None:
truncate_slug_length = int(tsl)
else:
truncate_slug_length = None
version_locations_str = config.get_main_option("version_locations")
version_locations: Optional[List[str]]
if version_locations_str:
version_path_separator = config.get_main_option(
"version_path_separator"
)
split_on_path = {
None: None,
"space": " ",
"newline": "\n",
"os": os.pathsep,
":": ":",
";": ";",
}
try:
split_char: Optional[str] = split_on_path[
version_path_separator
]
except KeyError as ke:
raise ValueError(
"'%s' is not a valid value for "
"version_path_separator; "
"expected 'space', 'newline', 'os', ':', ';'"
% version_path_separator
) from ke
else:
if split_char is None:
# legacy behaviour for backwards compatibility
version_locations = _split_on_space_comma.split(
version_locations_str
)
else:
version_locations = [
x.strip()
for x in version_locations_str.split(split_char)
if x
]
else:
version_locations = None
prepend_sys_path = config.get_main_option("prepend_sys_path")
if prepend_sys_path:
sys.path[:0] = list(
_split_on_space_comma_colon.split(prepend_sys_path)
)
rvl = config.get_main_option("recursive_version_locations") == "true"
return ScriptDirectory(
util.coerce_resource_to_filename(script_location),
file_template=config.get_main_option(
"file_template", _default_file_template
),
truncate_slug_length=truncate_slug_length,
sourceless=config.get_main_option("sourceless") == "true",
output_encoding=config.get_main_option("output_encoding", "utf-8"),
version_locations=version_locations,
timezone=config.get_main_option("timezone"),
hook_config=config.get_section("post_write_hooks", {}),
recursive_version_locations=rvl,
messaging_opts=config.messaging_opts,
)
@contextmanager
def _catch_revision_errors(
self,
ancestor: Optional[str] = None,
multiple_heads: Optional[str] = None,
start: Optional[str] = None,
end: Optional[str] = None,
resolution: Optional[str] = None,
) -> Iterator[None]:
try:
yield
except revision.RangeNotAncestorError as rna:
if start is None:
start = cast(Any, rna.lower)
if end is None:
end = cast(Any, rna.upper)
if not ancestor:
ancestor = (
"Requested range %(start)s:%(end)s does not refer to "
"ancestor/descendant revisions along the same branch"
)
ancestor = ancestor % {"start": start, "end": end}
raise util.CommandError(ancestor) from rna
except revision.MultipleHeads as mh:
if not multiple_heads:
multiple_heads = (
"Multiple head revisions are present for given "
"argument '%(head_arg)s'; please "
"specify a specific target revision, "
"'<branchname>@%(head_arg)s' to "
"narrow to a specific head, or 'heads' for all heads"
)
multiple_heads = multiple_heads % {
"head_arg": end or mh.argument,
"heads": util.format_as_comma(mh.heads),
}
raise util.CommandError(multiple_heads) from mh
except revision.ResolutionError as re:
if resolution is None:
resolution = "Can't locate revision identified by '%s'" % (
re.argument
)
raise util.CommandError(resolution) from re
except revision.RevisionError as err:
raise util.CommandError(err.args[0]) from err
def walk_revisions(
self, base: str = "base", head: str = "heads"
) -> Iterator[Script]:
"""Iterate through all revisions.
:param base: the base revision, or "base" to start from the
empty revision.
:param head: the head revision; defaults to "heads" to indicate
all head revisions. May also be "head" to indicate a single
head revision.
"""
with self._catch_revision_errors(start=base, end=head):
for rev in self.revision_map.iterate_revisions(
head, base, inclusive=True, assert_relative_length=False
):
yield cast(Script, rev)
def get_revisions(self, id_: _GetRevArg) -> Tuple[Script, ...]:
"""Return the :class:`.Script` instance with the given rev identifier,
symbolic name, or sequence of identifiers.
"""
with self._catch_revision_errors():
return cast(
Tuple[Script, ...],
self.revision_map.get_revisions(id_),
)
def get_all_current(self, id_: Tuple[str, ...]) -> Set[Script]:
with self._catch_revision_errors():
return cast(Set[Script], self.revision_map._get_all_current(id_))
def get_revision(self, id_: str) -> Script:
"""Return the :class:`.Script` instance with the given rev id.
.. seealso::
:meth:`.ScriptDirectory.get_revisions`
"""
with self._catch_revision_errors():
return cast(Script, self.revision_map.get_revision(id_))
def as_revision_number(
self, id_: Optional[str]
) -> Optional[Union[str, Tuple[str, ...]]]:
"""Convert a symbolic revision, i.e. 'head' or 'base', into
an actual revision number."""
with self._catch_revision_errors():
rev, branch_name = self.revision_map._resolve_revision_number(id_)
if not rev:
# convert () to None
return None
elif id_ == "heads":
return rev
else:
return rev[0]
def iterate_revisions(
self,
upper: Union[str, Tuple[str, ...], None],
lower: Union[str, Tuple[str, ...], None],
**kw: Any,
) -> Iterator[Script]:
"""Iterate through script revisions, starting at the given
upper revision identifier and ending at the lower.
The traversal uses strictly the `down_revision`
marker inside each migration script, so
it is a requirement that upper >= lower,
else you'll get nothing back.
The iterator yields :class:`.Script` objects.
.. seealso::
:meth:`.RevisionMap.iterate_revisions`
"""
return cast(
Iterator[Script],
self.revision_map.iterate_revisions(upper, lower, **kw),
)
def get_current_head(self) -> Optional[str]:
"""Return the current head revision.
If the script directory has multiple heads
due to branching, an error is raised;
:meth:`.ScriptDirectory.get_heads` should be
preferred.
:return: a string revision number.
.. seealso::
:meth:`.ScriptDirectory.get_heads`
"""
with self._catch_revision_errors(
multiple_heads=(
"The script directory has multiple heads (due to branching)."
"Please use get_heads(), or merge the branches using "
"alembic merge."
)
):
return self.revision_map.get_current_head()
def get_heads(self) -> List[str]:
"""Return all "versioned head" revisions as strings.
This is normally a list of length one,
unless branches are present. The
:meth:`.ScriptDirectory.get_current_head()` method
can be used normally when a script directory
has only one head.
:return: a tuple of string revision numbers.
"""
return list(self.revision_map.heads)
def get_base(self) -> Optional[str]:
"""Return the "base" revision as a string.
This is the revision number of the script that
has a ``down_revision`` of None.
If the script directory has multiple bases, an error is raised;
:meth:`.ScriptDirectory.get_bases` should be
preferred.
"""
bases = self.get_bases()
if len(bases) > 1:
raise util.CommandError(
"The script directory has multiple bases. "
"Please use get_bases()."
)
elif bases:
return bases[0]
else:
return None
def get_bases(self) -> List[str]:
"""return all "base" revisions as strings.
This is the revision number of all scripts that
have a ``down_revision`` of None.
"""
return list(self.revision_map.bases)
def _upgrade_revs(
self, destination: str, current_rev: str
) -> List[RevisionStep]:
with self._catch_revision_errors(
ancestor="Destination %(end)s is not a valid upgrade "
"target from current head(s)",
end=destination,
):
revs = self.iterate_revisions(
destination, current_rev, implicit_base=True
)
return [
migration.MigrationStep.upgrade_from_script(
self.revision_map, script
)
for script in reversed(list(revs))
]
def _downgrade_revs(
self, destination: str, current_rev: Optional[str]
) -> List[RevisionStep]:
with self._catch_revision_errors(
ancestor="Destination %(end)s is not a valid downgrade "
"target from current head(s)",
end=destination,
):
revs = self.iterate_revisions(
current_rev, destination, select_for_downgrade=True
)
return [
migration.MigrationStep.downgrade_from_script(
self.revision_map, script
)
for script in revs
]
def _stamp_revs(
self, revision: _RevIdType, heads: _RevIdType
) -> List[StampStep]:
with self._catch_revision_errors(
multiple_heads="Multiple heads are present; please specify a "
"single target revision"
):
heads_revs = self.get_revisions(heads)
steps = []
if not revision:
revision = "base"
filtered_heads: List[Script] = []
for rev in util.to_tuple(revision):
if rev:
filtered_heads.extend(
self.revision_map.filter_for_lineage(
cast(Sequence[Script], heads_revs),
rev,
include_dependencies=True,
)
)
filtered_heads = util.unique_list(filtered_heads)
dests = self.get_revisions(revision) or [None]
for dest in dests:
if dest is None:
# dest is 'base'. Return a "delete branch" migration
# for all applicable heads.
steps.extend(
[
migration.StampStep(
head.revision,
None,
False,
True,
self.revision_map,
)
for head in filtered_heads
]
)
continue
elif dest in filtered_heads:
# the dest is already in the version table, do nothing.
continue
# figure out if the dest is a descendant or an
# ancestor of the selected nodes
descendants = set(
self.revision_map._get_descendant_nodes([dest])
)
ancestors = set(self.revision_map._get_ancestor_nodes([dest]))
if descendants.intersection(filtered_heads):
# heads are above the target, so this is a downgrade.
# we can treat them as a "merge", single step.
assert not ancestors.intersection(filtered_heads)
todo_heads = [head.revision for head in filtered_heads]
step = migration.StampStep(
todo_heads,
dest.revision,
False,
False,
self.revision_map,
)
steps.append(step)
continue
elif ancestors.intersection(filtered_heads):
# heads are below the target, so this is an upgrade.
# we can treat them as a "merge", single step.
todo_heads = [head.revision for head in filtered_heads]
step = migration.StampStep(
todo_heads,
dest.revision,
True,
False,
self.revision_map,
)
steps.append(step)
continue
else:
# destination is in a branch not represented,
# treat it as new branch
step = migration.StampStep(
(), dest.revision, True, True, self.revision_map
)
steps.append(step)
continue
return steps
def run_env(self) -> None:
"""Run the script environment.
This basically runs the ``env.py`` script present
in the migration environment. It is called exclusively
by the command functions in :mod:`alembic.command`.
"""
util.load_python_file(self.dir, "env.py")
@property
def env_py_location(self) -> str:
return os.path.abspath(os.path.join(self.dir, "env.py"))
def _generate_template(self, src: str, dest: str, **kw: Any) -> None:
with util.status(
f"Generating {os.path.abspath(dest)}", **self.messaging_opts
):
util.template_to_file(src, dest, self.output_encoding, **kw)
def _copy_file(self, src: str, dest: str) -> None:
with util.status(
f"Generating {os.path.abspath(dest)}", **self.messaging_opts
):
shutil.copy(src, dest)
def _ensure_directory(self, path: str) -> None:
path = os.path.abspath(path)
if not os.path.exists(path):
with util.status(
f"Creating directory {path}", **self.messaging_opts
):
os.makedirs(path)
def _generate_create_date(self) -> datetime.datetime:
if self.timezone is not None:
if ZoneInfo is None:
raise util.CommandError(
"Python >= 3.9 is required for timezone support or "
"the 'backports.zoneinfo' package must be installed."
)
# First, assume correct capitalization
try:
tzinfo = ZoneInfo(self.timezone)
except ZoneInfoNotFoundError:
tzinfo = None
if tzinfo is None:
try:
tzinfo = ZoneInfo(self.timezone.upper())
except ZoneInfoNotFoundError:
raise util.CommandError(
"Can't locate timezone: %s" % self.timezone
) from None
create_date = (
datetime.datetime.utcnow()
.replace(tzinfo=datetime.timezone.utc)
.astimezone(tzinfo)
)
else:
create_date = datetime.datetime.now()
return create_date
def generate_revision(
self,
revid: str,
message: Optional[str],
head: Optional[_RevIdType] = None,
splice: Optional[bool] = False,
branch_labels: Optional[_RevIdType] = None,
version_path: Optional[str] = None,
depends_on: Optional[_RevIdType] = None,
**kw: Any,
) -> Optional[Script]:
"""Generate a new revision file.
This runs the ``script.py.mako`` template, given
template arguments, and creates a new file.
:param revid: String revision id. Typically this
comes from ``alembic.util.rev_id()``.
:param message: the revision message, the one passed
by the -m argument to the ``revision`` command.
:param head: the head revision to generate against. Defaults
to the current "head" if no branches are present, else raises
an exception.
:param splice: if True, allow the "head" version to not be an
actual head; otherwise, the selected head must be a head
(e.g. endpoint) revision.
"""
if head is None:
head = "head"
try:
Script.verify_rev_id(revid)
except revision.RevisionError as err:
raise util.CommandError(err.args[0]) from err
with self._catch_revision_errors(
multiple_heads=(
"Multiple heads are present; please specify the head "
"revision on which the new revision should be based, "
"or perform a merge."
)
):
heads = cast(
Tuple[Optional["Revision"], ...],
self.revision_map.get_revisions(head),
)
for h in heads:
assert h != "base" # type: ignore[comparison-overlap]
if len(set(heads)) != len(heads):
raise util.CommandError("Duplicate head revisions specified")
create_date = self._generate_create_date()
if version_path is None:
if len(self._version_locations) > 1:
for head_ in heads:
if head_ is not None:
assert isinstance(head_, Script)
version_path = os.path.dirname(head_.path)
break
else:
raise util.CommandError(
"Multiple version locations present, "
"please specify --version-path"
)
else:
version_path = self.versions
norm_path = os.path.normpath(os.path.abspath(version_path))
for vers_path in self._version_locations:
if os.path.normpath(vers_path) == norm_path:
break
else:
raise util.CommandError(
"Path %s is not represented in current "
"version locations" % version_path
)
if self.version_locations:
self._ensure_directory(version_path)
path = self._rev_path(version_path, revid, message, create_date)
if not splice:
for head_ in heads:
if head_ is not None and not head_.is_head:
raise util.CommandError(
"Revision %s is not a head revision; please specify "
"--splice to create a new branch from this revision"
% head_.revision
)
resolved_depends_on: Optional[List[str]]
if depends_on:
with self._catch_revision_errors():
resolved_depends_on = [
(
dep
if dep in rev.branch_labels # maintain branch labels
else rev.revision
) # resolve partial revision identifiers
for rev, dep in [
(not_none(self.revision_map.get_revision(dep)), dep)
for dep in util.to_list(depends_on)
]
]
else:
resolved_depends_on = None
self._generate_template(
os.path.join(self.dir, "script.py.mako"),
path,
up_revision=str(revid),
down_revision=revision.tuple_rev_as_scalar(
tuple(h.revision if h is not None else None for h in heads)
),
branch_labels=util.to_tuple(branch_labels),
depends_on=revision.tuple_rev_as_scalar(resolved_depends_on),
create_date=create_date,
comma=util.format_as_comma,
message=message if message is not None else ("empty message"),
**kw,
)
post_write_hooks = self.hook_config
if post_write_hooks:
write_hooks._run_hooks(path, post_write_hooks)
try:
script = Script._from_path(self, path)
except revision.RevisionError as err:
raise util.CommandError(err.args[0]) from err
if script is None:
return None
if branch_labels and not script.branch_labels:
raise util.CommandError(
"Version %s specified branch_labels %s, however the "
"migration file %s does not have them; have you upgraded "
"your script.py.mako to include the "
"'branch_labels' section?"
% (script.revision, branch_labels, script.path)
)
self.revision_map.add_revision(script)
return script
def _rev_path(
self,
path: str,
rev_id: str,
message: Optional[str],
create_date: datetime.datetime,
) -> str:
epoch = int(create_date.timestamp())
slug = "_".join(_slug_re.findall(message or "")).lower()
if len(slug) > self.truncate_slug_length:
slug = slug[: self.truncate_slug_length].rsplit("_", 1)[0] + "_"
filename = "%s.py" % (
self.file_template
% {
"rev": rev_id,
"slug": slug,
"epoch": epoch,
"year": create_date.year,
"month": create_date.month,
"day": create_date.day,
"hour": create_date.hour,
"minute": create_date.minute,
"second": create_date.second,
}
)
return os.path.join(path, filename)
class Script(revision.Revision):
"""Represent a single revision file in a ``versions/`` directory.
The :class:`.Script` instance is returned by methods
such as :meth:`.ScriptDirectory.iterate_revisions`.
"""
def __init__(self, module: ModuleType, rev_id: str, path: str):
self.module = module
self.path = path
super().__init__(
rev_id,
module.down_revision,
branch_labels=util.to_tuple(
getattr(module, "branch_labels", None), default=()
),
dependencies=util.to_tuple(
getattr(module, "depends_on", None), default=()
),
)
module: ModuleType
"""The Python module representing the actual script itself."""
path: str
"""Filesystem path of the script."""
_db_current_indicator: Optional[bool] = None
"""Utility variable which when set will cause string output to indicate
this is a "current" version in some database"""
@property
def doc(self) -> str:
"""Return the docstring given in the script."""
return re.split("\n\n", self.longdoc)[0]
@property
def longdoc(self) -> str:
"""Return the docstring given in the script."""
doc = self.module.__doc__
if doc:
if hasattr(self.module, "_alembic_source_encoding"):
doc = doc.decode( # type: ignore[attr-defined]
self.module._alembic_source_encoding
)
return doc.strip() # type: ignore[union-attr]
else:
return ""
@property
def log_entry(self) -> str:
entry = "Rev: %s%s%s%s%s\n" % (
self.revision,
" (head)" if self.is_head else "",
" (branchpoint)" if self.is_branch_point else "",
" (mergepoint)" if self.is_merge_point else "",
" (current)" if self._db_current_indicator else "",
)
if self.is_merge_point:
entry += "Merges: %s\n" % (self._format_down_revision(),)
else:
entry += "Parent: %s\n" % (self._format_down_revision(),)
if self.dependencies:
entry += "Also depends on: %s\n" % (
util.format_as_comma(self.dependencies)
)
if self.is_branch_point:
entry += "Branches into: %s\n" % (
util.format_as_comma(self.nextrev)
)
if self.branch_labels:
entry += "Branch names: %s\n" % (
util.format_as_comma(self.branch_labels),
)
entry += "Path: %s\n" % (self.path,)
entry += "\n%s\n" % (
"\n".join(" %s" % para for para in self.longdoc.splitlines())
)
return entry
def __str__(self) -> str:
return "%s -> %s%s%s%s, %s" % (
self._format_down_revision(),
self.revision,
" (head)" if self.is_head else "",
" (branchpoint)" if self.is_branch_point else "",
" (mergepoint)" if self.is_merge_point else "",
self.doc,
)
def _head_only(
self,
include_branches: bool = False,
include_doc: bool = False,
include_parents: bool = False,
tree_indicators: bool = True,
head_indicators: bool = True,
) -> str:
text = self.revision
if include_parents:
if self.dependencies:
text = "%s (%s) -> %s" % (
self._format_down_revision(),
util.format_as_comma(self.dependencies),
text,
)
else:
text = "%s -> %s" % (self._format_down_revision(), text)
assert text is not None
if include_branches and self.branch_labels:
text += " (%s)" % util.format_as_comma(self.branch_labels)
if head_indicators or tree_indicators:
text += "%s%s%s" % (
" (head)" if self._is_real_head else "",
(
" (effective head)"
if self.is_head and not self._is_real_head
else ""
),
" (current)" if self._db_current_indicator else "",
)
if tree_indicators:
text += "%s%s" % (
" (branchpoint)" if self.is_branch_point else "",
" (mergepoint)" if self.is_merge_point else "",
)
if include_doc:
text += ", %s" % self.doc
return text
def cmd_format(
self,
verbose: bool,
include_branches: bool = False,
include_doc: bool = False,
include_parents: bool = False,
tree_indicators: bool = True,
) -> str:
if verbose:
return self.log_entry
else:
return self._head_only(
include_branches, include_doc, include_parents, tree_indicators
)
def _format_down_revision(self) -> str:
if not self.down_revision:
return "<base>"
else:
return util.format_as_comma(self._versioned_down_revisions)
@classmethod
def _from_path(
cls, scriptdir: ScriptDirectory, path: str
) -> Optional[Script]:
dir_, filename = os.path.split(path)
return cls._from_filename(scriptdir, dir_, filename)
@classmethod
def _list_py_dir(cls, scriptdir: ScriptDirectory, path: str) -> List[str]:
paths = []
for root, dirs, files in os.walk(path, topdown=True):
if root.endswith("__pycache__"):
# a special case - we may include these files
# if a `sourceless` option is specified
continue
for filename in sorted(files):
paths.append(os.path.join(root, filename))
if scriptdir.sourceless:
# look for __pycache__
py_cache_path = os.path.join(root, "__pycache__")
if os.path.exists(py_cache_path):
# add all files from __pycache__ whose filename is not
# already in the names we got from the version directory.
# add as relative paths including __pycache__ token
names = {filename.split(".")[0] for filename in files}
paths.extend(
os.path.join(py_cache_path, pyc)
for pyc in os.listdir(py_cache_path)
if pyc.split(".")[0] not in names
)
if not scriptdir.recursive_version_locations:
break
# the real script order is defined by revision,
# but it may be undefined if there are many files with a same
# `down_revision`, for a better user experience (ex. debugging),
# we use a deterministic order
dirs.sort()
return paths
@classmethod
def _from_filename(
cls, scriptdir: ScriptDirectory, dir_: str, filename: str
) -> Optional[Script]:
if scriptdir.sourceless:
py_match = _sourceless_rev_file.match(filename)
else:
py_match = _only_source_rev_file.match(filename)
if not py_match:
return None
py_filename = py_match.group(1)
if scriptdir.sourceless:
is_c = py_match.group(2) == "c"
is_o = py_match.group(2) == "o"
else:
is_c = is_o = False
if is_o or is_c:
py_exists = os.path.exists(os.path.join(dir_, py_filename))
pyc_exists = os.path.exists(os.path.join(dir_, py_filename + "c"))
# prefer .py over .pyc because we'd like to get the
# source encoding; prefer .pyc over .pyo because we'd like to
# have the docstrings which a -OO file would not have
if py_exists or is_o and pyc_exists:
return None
module = util.load_python_file(dir_, filename)
if not hasattr(module, "revision"):
# attempt to get the revision id from the script name,
# this for legacy only
m = _legacy_rev.match(filename)
if not m:
raise util.CommandError(
"Could not determine revision id from filename %s. "
"Be sure the 'revision' variable is "
"declared inside the script (please see 'Upgrading "
"from Alembic 0.1 to 0.2' in the documentation)."
% filename
)
else:
revision = m.group(1)
else:
revision = module.revision
return Script(module, revision, os.path.join(dir_, filename))
|
from __future__ import annotations
import collections
import re
from typing import Any
from typing import Callable
from typing import cast
from typing import Collection
from typing import Deque
from typing import Dict
from typing import FrozenSet
from typing import Iterable
from typing import Iterator
from typing import List
from typing import Optional
from typing import overload
from typing import Protocol
from typing import Sequence
from typing import Set
from typing import Tuple
from typing import TYPE_CHECKING
from typing import TypeVar
from typing import Union
from sqlalchemy import util as sqlautil
from .. import util
from ..util import not_none
if TYPE_CHECKING:
from typing import Literal
_RevIdType = Union[str, List[str], Tuple[str, ...]]
_GetRevArg = Union[
str,
Iterable[Optional[str]],
Iterable[str],
]
_RevisionIdentifierType = Union[str, Tuple[str, ...], None]
_RevisionOrStr = Union["Revision", str]
_RevisionOrBase = Union["Revision", "Literal['base']"]
_InterimRevisionMapType = Dict[str, "Revision"]
_RevisionMapType = Dict[Union[None, str, Tuple[()]], Optional["Revision"]]
_T = TypeVar("_T")
_TR = TypeVar("_TR", bound=Optional[_RevisionOrStr])
_relative_destination = re.compile(r"(?:(.+?)@)?(\w+)?((?:\+|-)\d+)")
_revision_illegal_chars = ["@", "-", "+"]
class _CollectRevisionsProtocol(Protocol):
def __call__(
self,
upper: _RevisionIdentifierType,
lower: _RevisionIdentifierType,
inclusive: bool,
implicit_base: bool,
assert_relative_length: bool,
) -> Tuple[Set[Revision], Tuple[Optional[_RevisionOrBase], ...]]: ...
class RevisionError(Exception):
pass
class RangeNotAncestorError(RevisionError):
def __init__(
self, lower: _RevisionIdentifierType, upper: _RevisionIdentifierType
) -> None:
self.lower = lower
self.upper = upper
super().__init__(
"Revision %s is not an ancestor of revision %s"
% (lower or "base", upper or "base")
)
class MultipleHeads(RevisionError):
def __init__(self, heads: Sequence[str], argument: Optional[str]) -> None:
self.heads = heads
self.argument = argument
super().__init__(
"Multiple heads are present for given argument '%s'; "
"%s" % (argument, ", ".join(heads))
)
class ResolutionError(RevisionError):
def __init__(self, message: str, argument: str) -> None:
super().__init__(message)
self.argument = argument
class CycleDetected(RevisionError):
kind = "Cycle"
def __init__(self, revisions: Sequence[str]) -> None:
self.revisions = revisions
super().__init__(
"%s is detected in revisions (%s)"
% (self.kind, ", ".join(revisions))
)
class DependencyCycleDetected(CycleDetected):
kind = "Dependency cycle"
def __init__(self, revisions: Sequence[str]) -> None:
super().__init__(revisions)
class LoopDetected(CycleDetected):
kind = "Self-loop"
def __init__(self, revision: str) -> None:
super().__init__([revision])
class DependencyLoopDetected(DependencyCycleDetected, LoopDetected):
kind = "Dependency self-loop"
def __init__(self, revision: Sequence[str]) -> None:
super().__init__(revision)
class RevisionMap:
"""Maintains a map of :class:`.Revision` objects.
:class:`.RevisionMap` is used by :class:`.ScriptDirectory` to maintain
and traverse the collection of :class:`.Script` objects, which are
themselves instances of :class:`.Revision`.
"""
def __init__(self, generator: Callable[[], Iterable[Revision]]) -> None:
"""Construct a new :class:`.RevisionMap`.
:param generator: a zero-arg callable that will generate an iterable
of :class:`.Revision` instances to be used. These are typically
:class:`.Script` subclasses within regular Alembic use.
"""
self._generator = generator
@util.memoized_property
def heads(self) -> Tuple[str, ...]:
"""All "head" revisions as strings.
This is normally a tuple of length one,
unless unmerged branches are present.
:return: a tuple of string revision numbers.
"""
self._revision_map
return self.heads
@util.memoized_property
def bases(self) -> Tuple[str, ...]:
"""All "base" revisions as strings.
These are revisions that have a ``down_revision`` of None,
or empty tuple.
:return: a tuple of string revision numbers.
"""
self._revision_map
return self.bases
@util.memoized_property
def _real_heads(self) -> Tuple[str, ...]:
"""All "real" head revisions as strings.
:return: a tuple of string revision numbers.
"""
self._revision_map
return self._real_heads
@util.memoized_property
def _real_bases(self) -> Tuple[str, ...]:
"""All "real" base revisions as strings.
:return: a tuple of string revision numbers.
"""
self._revision_map
return self._real_bases
@util.memoized_property
def _revision_map(self) -> _RevisionMapType:
"""memoized attribute, initializes the revision map from the
initial collection.
"""
# Ordering required for some tests to pass (but not required in
# general)
map_: _InterimRevisionMapType = sqlautil.OrderedDict()
heads: Set[Revision] = sqlautil.OrderedSet()
_real_heads: Set[Revision] = sqlautil.OrderedSet()
bases: Tuple[Revision, ...] = ()
_real_bases: Tuple[Revision, ...] = ()
has_branch_labels = set()
all_revisions = set()
for revision in self._generator():
all_revisions.add(revision)
if revision.revision in map_:
util.warn(
"Revision %s is present more than once" % revision.revision
)
map_[revision.revision] = revision
if revision.branch_labels:
has_branch_labels.add(revision)
heads.add(revision)
_real_heads.add(revision)
if revision.is_base:
bases += (revision,)
if revision._is_real_base:
_real_bases += (revision,)
# add the branch_labels to the map_. We'll need these
# to resolve the dependencies.
rev_map = map_.copy()
self._map_branch_labels(
has_branch_labels, cast(_RevisionMapType, map_)
)
# resolve dependency names from branch labels and symbolic
# names
self._add_depends_on(all_revisions, cast(_RevisionMapType, map_))
for rev in map_.values():
for downrev in rev._all_down_revisions:
if downrev not in map_:
util.warn(
"Revision %s referenced from %s is not present"
% (downrev, rev)
)
down_revision = map_[downrev]
down_revision.add_nextrev(rev)
if downrev in rev._versioned_down_revisions:
heads.discard(down_revision)
_real_heads.discard(down_revision)
# once the map has downrevisions populated, the dependencies
# can be further refined to include only those which are not
# already ancestors
self._normalize_depends_on(all_revisions, cast(_RevisionMapType, map_))
self._detect_cycles(rev_map, heads, bases, _real_heads, _real_bases)
revision_map: _RevisionMapType = dict(map_.items())
revision_map[None] = revision_map[()] = None
self.heads = tuple(rev.revision for rev in heads)
self._real_heads = tuple(rev.revision for rev in _real_heads)
self.bases = tuple(rev.revision for rev in bases)
self._real_bases = tuple(rev.revision for rev in _real_bases)
self._add_branches(has_branch_labels, revision_map)
return revision_map
def _detect_cycles(
self,
rev_map: _InterimRevisionMapType,
heads: Set[Revision],
bases: Tuple[Revision, ...],
_real_heads: Set[Revision],
_real_bases: Tuple[Revision, ...],
) -> None:
if not rev_map:
return
if not heads or not bases:
raise CycleDetected(list(rev_map))
total_space = {
rev.revision
for rev in self._iterate_related_revisions(
lambda r: r._versioned_down_revisions,
heads,
map_=cast(_RevisionMapType, rev_map),
)
}.intersection(
rev.revision
for rev in self._iterate_related_revisions(
lambda r: r.nextrev,
bases,
map_=cast(_RevisionMapType, rev_map),
)
)
deleted_revs = set(rev_map.keys()) - total_space
if deleted_revs:
raise CycleDetected(sorted(deleted_revs))
if not _real_heads or not _real_bases:
raise DependencyCycleDetected(list(rev_map))
total_space = {
rev.revision
for rev in self._iterate_related_revisions(
lambda r: r._all_down_revisions,
_real_heads,
map_=cast(_RevisionMapType, rev_map),
)
}.intersection(
rev.revision
for rev in self._iterate_related_revisions(
lambda r: r._all_nextrev,
_real_bases,
map_=cast(_RevisionMapType, rev_map),
)
)
deleted_revs = set(rev_map.keys()) - total_space
if deleted_revs:
raise DependencyCycleDetected(sorted(deleted_revs))
def _map_branch_labels(
self, revisions: Collection[Revision], map_: _RevisionMapType
) -> None:
for revision in revisions:
if revision.branch_labels:
assert revision._orig_branch_labels is not None
for branch_label in revision._orig_branch_labels:
if branch_label in map_:
map_rev = map_[branch_label]
assert map_rev is not None
raise RevisionError(
"Branch name '%s' in revision %s already "
"used by revision %s"
% (
branch_label,
revision.revision,
map_rev.revision,
)
)
map_[branch_label] = revision
def _add_branches(
self, revisions: Collection[Revision], map_: _RevisionMapType
) -> None:
for revision in revisions:
if revision.branch_labels:
revision.branch_labels.update(revision.branch_labels)
for node in self._get_descendant_nodes(
[revision], map_, include_dependencies=False
):
node.branch_labels.update(revision.branch_labels)
parent = node
while (
parent
and not parent._is_real_branch_point
and not parent.is_merge_point
):
parent.branch_labels.update(revision.branch_labels)
if parent.down_revision:
parent = map_[parent.down_revision]
else:
break
def _add_depends_on(
self, revisions: Collection[Revision], map_: _RevisionMapType
) -> None:
"""Resolve the 'dependencies' for each revision in a collection
in terms of actual revision ids, as opposed to branch labels or other
symbolic names.
The collection is then assigned to the _resolved_dependencies
attribute on each revision object.
"""
for revision in revisions:
if revision.dependencies:
deps = [
map_[dep] for dep in util.to_tuple(revision.dependencies)
]
revision._resolved_dependencies = tuple(
[d.revision for d in deps if d is not None]
)
else:
revision._resolved_dependencies = ()
def _normalize_depends_on(
self, revisions: Collection[Revision], map_: _RevisionMapType
) -> None:
"""Create a collection of "dependencies" that omits dependencies
that are already ancestor nodes for each revision in a given
collection.
This builds upon the _resolved_dependencies collection created in the
_add_depends_on() method, looking in the fully populated revision map
for ancestors, and omitting them as the _resolved_dependencies
collection as it is copied to a new collection. The new collection is
then assigned to the _normalized_resolved_dependencies attribute on
each revision object.
The collection is then used to determine the immediate "down revision"
identifiers for this revision.
"""
for revision in revisions:
if revision._resolved_dependencies:
normalized_resolved = set(revision._resolved_dependencies)
for rev in self._get_ancestor_nodes(
[revision],
include_dependencies=False,
map_=map_,
):
if rev is revision:
continue
elif rev._resolved_dependencies:
normalized_resolved.difference_update(
rev._resolved_dependencies
)
revision._normalized_resolved_dependencies = tuple(
normalized_resolved
)
else:
revision._normalized_resolved_dependencies = ()
def add_revision(self, revision: Revision, _replace: bool = False) -> None:
"""add a single revision to an existing map.
This method is for single-revision use cases, it's not
appropriate for fully populating an entire revision map.
"""
map_ = self._revision_map
if not _replace and revision.revision in map_:
util.warn(
"Revision %s is present more than once" % revision.revision
)
elif _replace and revision.revision not in map_:
raise Exception("revision %s not in map" % revision.revision)
map_[revision.revision] = revision
revisions = [revision]
self._add_branches(revisions, map_)
self._map_branch_labels(revisions, map_)
self._add_depends_on(revisions, map_)
if revision.is_base:
self.bases += (revision.revision,)
if revision._is_real_base:
self._real_bases += (revision.revision,)
for downrev in revision._all_down_revisions:
if downrev not in map_:
util.warn(
"Revision %s referenced from %s is not present"
% (downrev, revision)
)
not_none(map_[downrev]).add_nextrev(revision)
self._normalize_depends_on(revisions, map_)
if revision._is_real_head:
self._real_heads = tuple(
head
for head in self._real_heads
if head
not in set(revision._all_down_revisions).union(
[revision.revision]
)
) + (revision.revision,)
if revision.is_head:
self.heads = tuple(
head
for head in self.heads
if head
not in set(revision._versioned_down_revisions).union(
[revision.revision]
)
) + (revision.revision,)
def get_current_head(
self, branch_label: Optional[str] = None
) -> Optional[str]:
"""Return the current head revision.
If the script directory has multiple heads
due to branching, an error is raised;
:meth:`.ScriptDirectory.get_heads` should be
preferred.
:param branch_label: optional branch name which will limit the
heads considered to those which include that branch_label.
:return: a string revision number.
.. seealso::
:meth:`.ScriptDirectory.get_heads`
"""
current_heads: Sequence[str] = self.heads
if branch_label:
current_heads = self.filter_for_lineage(
current_heads, branch_label
)
if len(current_heads) > 1:
raise MultipleHeads(
current_heads,
"%s@head" % branch_label if branch_label else "head",
)
if current_heads:
return current_heads[0]
else:
return None
def _get_base_revisions(self, identifier: str) -> Tuple[str, ...]:
return self.filter_for_lineage(self.bases, identifier)
def get_revisions(
self, id_: Optional[_GetRevArg]
) -> Tuple[Optional[_RevisionOrBase], ...]:
"""Return the :class:`.Revision` instances with the given rev id
or identifiers.
May be given a single identifier, a sequence of identifiers, or the
special symbols "head" or "base". The result is a tuple of one
or more identifiers, or an empty tuple in the case of "base".
In the cases where 'head', 'heads' is requested and the
revision map is empty, returns an empty tuple.
Supports partial identifiers, where the given identifier
is matched against all identifiers that start with the given
characters; if there is exactly one match, that determines the
full revision.
"""
if isinstance(id_, (list, tuple, set, frozenset)):
return sum([self.get_revisions(id_elem) for id_elem in id_], ())
else:
resolved_id, branch_label = self._resolve_revision_number(id_)
if len(resolved_id) == 1:
try:
rint = int(resolved_id[0])
if rint < 0:
# branch@-n -> walk down from heads
select_heads = self.get_revisions("heads")
if branch_label is not None:
select_heads = tuple(
head
for head in select_heads
if branch_label
in is_revision(head).branch_labels
)
return tuple(
self._walk(head, steps=rint)
for head in select_heads
)
except ValueError:
# couldn't resolve as integer
pass
return tuple(
self._revision_for_ident(rev_id, branch_label)
for rev_id in resolved_id
)
def get_revision(self, id_: Optional[str]) -> Optional[Revision]:
"""Return the :class:`.Revision` instance with the given rev id.
If a symbolic name such as "head" or "base" is given, resolves
the identifier into the current head or base revision. If the symbolic
name refers to multiples, :class:`.MultipleHeads` is raised.
Supports partial identifiers, where the given identifier
is matched against all identifiers that start with the given
characters; if there is exactly one match, that determines the
full revision.
"""
resolved_id, branch_label = self._resolve_revision_number(id_)
if len(resolved_id) > 1:
raise MultipleHeads(resolved_id, id_)
resolved: Union[str, Tuple[()]] = resolved_id[0] if resolved_id else ()
return self._revision_for_ident(resolved, branch_label)
def _resolve_branch(self, branch_label: str) -> Optional[Revision]:
try:
branch_rev = self._revision_map[branch_label]
except KeyError:
try:
nonbranch_rev = self._revision_for_ident(branch_label)
except ResolutionError as re:
raise ResolutionError(
"No such branch: '%s'" % branch_label, branch_label
) from re
else:
return nonbranch_rev
else:
return branch_rev
def _revision_for_ident(
self,
resolved_id: Union[str, Tuple[()], None],
check_branch: Optional[str] = None,
) -> Optional[Revision]:
branch_rev: Optional[Revision]
if check_branch:
branch_rev = self._resolve_branch(check_branch)
else:
branch_rev = None
revision: Union[Optional[Revision], Literal[False]]
try:
revision = self._revision_map[resolved_id]
except KeyError:
# break out to avoid misleading py3k stack traces
revision = False
revs: Sequence[str]
if revision is False:
assert resolved_id
# do a partial lookup
revs = [
x
for x in self._revision_map
if x and len(x) > 3 and x.startswith(resolved_id)
]
if branch_rev:
revs = self.filter_for_lineage(revs, check_branch)
if not revs:
raise ResolutionError(
"No such revision or branch '%s'%s"
% (
resolved_id,
(
"; please ensure at least four characters are "
"present for partial revision identifier matches"
if len(resolved_id) < 4
else ""
),
),
resolved_id,
)
elif len(revs) > 1:
raise ResolutionError(
"Multiple revisions start "
"with '%s': %s..."
% (resolved_id, ", ".join("'%s'" % r for r in revs[0:3])),
resolved_id,
)
else:
revision = self._revision_map[revs[0]]
if check_branch and revision is not None:
assert branch_rev is not None
assert resolved_id
if not self._shares_lineage(
revision.revision, branch_rev.revision
):
raise ResolutionError(
"Revision %s is not a member of branch '%s'"
% (revision.revision, check_branch),
resolved_id,
)
return revision
def _filter_into_branch_heads(
self, targets: Iterable[Optional[_RevisionOrBase]]
) -> Set[Optional[_RevisionOrBase]]:
targets = set(targets)
for rev in list(targets):
assert rev
if targets.intersection(
self._get_descendant_nodes([rev], include_dependencies=False)
).difference([rev]):
targets.discard(rev)
return targets
def filter_for_lineage(
self,
targets: Iterable[_TR],
check_against: Optional[str],
include_dependencies: bool = False,
) -> Tuple[_TR, ...]:
id_, branch_label = self._resolve_revision_number(check_against)
shares = []
if branch_label:
shares.append(branch_label)
if id_:
shares.extend(id_)
return tuple(
tg
for tg in targets
if self._shares_lineage(
tg, shares, include_dependencies=include_dependencies
)
)
def _shares_lineage(
self,
target: Optional[_RevisionOrStr],
test_against_revs: Sequence[_RevisionOrStr],
include_dependencies: bool = False,
) -> bool:
if not test_against_revs:
return True
if not isinstance(target, Revision):
resolved_target = not_none(self._revision_for_ident(target))
else:
resolved_target = target
resolved_test_against_revs = [
(
self._revision_for_ident(test_against_rev)
if not isinstance(test_against_rev, Revision)
else test_against_rev
)
for test_against_rev in util.to_tuple(
test_against_revs, default=()
)
]
return bool(
set(
self._get_descendant_nodes(
[resolved_target],
include_dependencies=include_dependencies,
)
)
.union(
self._get_ancestor_nodes(
[resolved_target],
include_dependencies=include_dependencies,
)
)
.intersection(resolved_test_against_revs)
)
def _resolve_revision_number(
self, id_: Optional[_GetRevArg]
) -> Tuple[Tuple[str, ...], Optional[str]]:
branch_label: Optional[str]
if isinstance(id_, str) and "@" in id_:
branch_label, id_ = id_.split("@", 1)
elif id_ is not None and (
(isinstance(id_, tuple) and id_ and not isinstance(id_[0], str))
or not isinstance(id_, (str, tuple))
):
raise RevisionError(
"revision identifier %r is not a string; ensure database "
"driver settings are correct" % (id_,)
)
else:
branch_label = None
# ensure map is loaded
self._revision_map
if id_ == "heads":
if branch_label:
return (
self.filter_for_lineage(self.heads, branch_label),
branch_label,
)
else:
return self._real_heads, branch_label
elif id_ == "head":
current_head = self.get_current_head(branch_label)
if current_head:
return (current_head,), branch_label
else:
return (), branch_label
elif id_ == "base" or id_ is None:
return (), branch_label
else:
return util.to_tuple(id_, default=None), branch_label
def iterate_revisions(
self,
upper: _RevisionIdentifierType,
lower: _RevisionIdentifierType,
implicit_base: bool = False,
inclusive: bool = False,
assert_relative_length: bool = True,
select_for_downgrade: bool = False,
) -> Iterator[Revision]:
"""Iterate through script revisions, starting at the given
upper revision identifier and ending at the lower.
The traversal uses strictly the `down_revision`
marker inside each migration script, so
it is a requirement that upper >= lower,
else you'll get nothing back.
The iterator yields :class:`.Revision` objects.
"""
fn: _CollectRevisionsProtocol
if select_for_downgrade:
fn = self._collect_downgrade_revisions
else:
fn = self._collect_upgrade_revisions
revisions, heads = fn(
upper,
lower,
inclusive=inclusive,
implicit_base=implicit_base,
assert_relative_length=assert_relative_length,
)
for node in self._topological_sort(revisions, heads):
yield not_none(self.get_revision(node))
def _get_descendant_nodes(
self,
targets: Collection[Optional[_RevisionOrBase]],
map_: Optional[_RevisionMapType] = None,
check: bool = False,
omit_immediate_dependencies: bool = False,
include_dependencies: bool = True,
) -> Iterator[Any]:
if omit_immediate_dependencies:
def fn(rev: Revision) -> Iterable[str]:
if rev not in targets:
return rev._all_nextrev
else:
return rev.nextrev
elif include_dependencies:
def fn(rev: Revision) -> Iterable[str]:
return rev._all_nextrev
else:
def fn(rev: Revision) -> Iterable[str]:
return rev.nextrev
return self._iterate_related_revisions(
fn, targets, map_=map_, check=check
)
def _get_ancestor_nodes(
self,
targets: Collection[Optional[_RevisionOrBase]],
map_: Optional[_RevisionMapType] = None,
check: bool = False,
include_dependencies: bool = True,
) -> Iterator[Revision]:
if include_dependencies:
def fn(rev: Revision) -> Iterable[str]:
return rev._normalized_down_revisions
else:
def fn(rev: Revision) -> Iterable[str]:
return rev._versioned_down_revisions
return self._iterate_related_revisions(
fn, targets, map_=map_, check=check
)
def _iterate_related_revisions(
self,
fn: Callable[[Revision], Iterable[str]],
targets: Collection[Optional[_RevisionOrBase]],
map_: Optional[_RevisionMapType],
check: bool = False,
) -> Iterator[Revision]:
if map_ is None:
map_ = self._revision_map
seen = set()
todo: Deque[Revision] = collections.deque()
for target_for in targets:
target = is_revision(target_for)
todo.append(target)
if check:
per_target = set()
while todo:
rev = todo.pop()
if check:
per_target.add(rev)
if rev in seen:
continue
seen.add(rev)
# Check for map errors before collecting.
for rev_id in fn(rev):
next_rev = map_[rev_id]
assert next_rev is not None
if next_rev.revision != rev_id:
raise RevisionError(
"Dependency resolution failed; broken map"
)
todo.append(next_rev)
yield rev
if check:
overlaps = per_target.intersection(targets).difference(
[target]
)
if overlaps:
raise RevisionError(
"Requested revision %s overlaps with "
"other requested revisions %s"
% (
target.revision,
", ".join(r.revision for r in overlaps),
)
)
def _topological_sort(
self,
revisions: Collection[Revision],
heads: Any,
) -> List[str]:
"""Yield revision ids of a collection of Revision objects in
topological sorted order (i.e. revisions always come after their
down_revisions and dependencies). Uses the order of keys in
_revision_map to sort.
"""
id_to_rev = self._revision_map
def get_ancestors(rev_id: str) -> Set[str]:
return {
r.revision
for r in self._get_ancestor_nodes([id_to_rev[rev_id]])
}
todo = {d.revision for d in revisions}
# Use revision map (ordered dict) key order to pre-sort.
inserted_order = list(self._revision_map)
current_heads = list(
sorted(
{d.revision for d in heads if d.revision in todo},
key=inserted_order.index,
)
)
ancestors_by_idx = [get_ancestors(rev_id) for rev_id in current_heads]
output = []
current_candidate_idx = 0
while current_heads:
candidate = current_heads[current_candidate_idx]
for check_head_index, ancestors in enumerate(ancestors_by_idx):
# scan all the heads. see if we can continue walking
# down the current branch indicated by current_candidate_idx.
if (
check_head_index != current_candidate_idx
and candidate in ancestors
):
current_candidate_idx = check_head_index
# nope, another head is dependent on us, they have
# to be traversed first
break
else:
# yup, we can emit
if candidate in todo:
output.append(candidate)
todo.remove(candidate)
# now update the heads with our ancestors.
candidate_rev = id_to_rev[candidate]
assert candidate_rev is not None
heads_to_add = [
r
for r in candidate_rev._normalized_down_revisions
if r in todo and r not in current_heads
]
if not heads_to_add:
# no ancestors, so remove this head from the list
del current_heads[current_candidate_idx]
del ancestors_by_idx[current_candidate_idx]
current_candidate_idx = max(current_candidate_idx - 1, 0)
else:
if (
not candidate_rev._normalized_resolved_dependencies
and len(candidate_rev._versioned_down_revisions) == 1
):
current_heads[current_candidate_idx] = heads_to_add[0]
# for plain movement down a revision line without
# any mergepoints, branchpoints, or deps, we
# can update the ancestors collection directly
# by popping out the candidate we just emitted
ancestors_by_idx[current_candidate_idx].discard(
candidate
)
else:
# otherwise recalculate it again, things get
# complicated otherwise. This can possibly be
# improved to not run the whole ancestor thing
# each time but it was getting complicated
current_heads[current_candidate_idx] = heads_to_add[0]
current_heads.extend(heads_to_add[1:])
ancestors_by_idx[current_candidate_idx] = (
get_ancestors(heads_to_add[0])
)
ancestors_by_idx.extend(
get_ancestors(head) for head in heads_to_add[1:]
)
assert not todo
return output
def _walk(
self,
start: Optional[Union[str, Revision]],
steps: int,
branch_label: Optional[str] = None,
no_overwalk: bool = True,
) -> Optional[_RevisionOrBase]:
"""
Walk the requested number of :steps up (steps > 0) or down (steps < 0)
the revision tree.
:branch_label is used to select branches only when walking up.
If the walk goes past the boundaries of the tree and :no_overwalk is
True, None is returned, otherwise the walk terminates early.
A RevisionError is raised if there is no unambiguous revision to
walk to.
"""
initial: Optional[_RevisionOrBase]
if isinstance(start, str):
initial = self.get_revision(start)
else:
initial = start
children: Sequence[Optional[_RevisionOrBase]]
for _ in range(abs(steps)):
if steps > 0:
assert initial != "base" # type: ignore[comparison-overlap]
# Walk up
walk_up = [
is_revision(rev)
for rev in self.get_revisions(
self.bases if initial is None else initial.nextrev
)
]
if branch_label:
children = self.filter_for_lineage(walk_up, branch_label)
else:
children = walk_up
else:
# Walk down
if initial == "base": # type: ignore[comparison-overlap]
children = ()
else:
children = self.get_revisions(
self.heads
if initial is None
else initial.down_revision
)
if not children:
children = ("base",)
if not children:
# This will return an invalid result if no_overwalk, otherwise
# further steps will stay where we are.
ret = None if no_overwalk else initial
return ret
elif len(children) > 1:
raise RevisionError("Ambiguous walk")
initial = children[0]
return initial
def _parse_downgrade_target(
self,
current_revisions: _RevisionIdentifierType,
target: _RevisionIdentifierType,
assert_relative_length: bool,
) -> Tuple[Optional[str], Optional[_RevisionOrBase]]:
"""
Parse downgrade command syntax :target to retrieve the target revision
and branch label (if any) given the :current_revisions stamp of the
database.
Returns a tuple (branch_label, target_revision) where branch_label
is a string from the command specifying the branch to consider (or
None if no branch given), and target_revision is a Revision object
which the command refers to. target_revisions is None if the command
refers to 'base'. The target may be specified in absolute form, or
relative to :current_revisions.
"""
if target is None:
return None, None
assert isinstance(
target, str
), "Expected downgrade target in string form"
match = _relative_destination.match(target)
if match:
branch_label, symbol, relative = match.groups()
rel_int = int(relative)
if rel_int >= 0:
if symbol is None:
# Downgrading to current + n is not valid.
raise RevisionError(
"Relative revision %s didn't "
"produce %d migrations" % (relative, abs(rel_int))
)
# Find target revision relative to given symbol.
rev = self._walk(
symbol,
rel_int,
branch_label,
no_overwalk=assert_relative_length,
)
if rev is None:
raise RevisionError("Walked too far")
return branch_label, rev
else:
relative_revision = symbol is None
if relative_revision:
# Find target revision relative to current state.
if branch_label:
cr_tuple = util.to_tuple(current_revisions)
symbol_list: Sequence[str]
symbol_list = self.filter_for_lineage(
cr_tuple, branch_label
)
if not symbol_list:
# check the case where there are multiple branches
# but there is currently a single heads, since all
# other branch heads are dependent of the current
# single heads.
all_current = cast(
Set[Revision], self._get_all_current(cr_tuple)
)
sl_all_current = self.filter_for_lineage(
all_current, branch_label
)
symbol_list = [
r.revision if r else r # type: ignore[misc]
for r in sl_all_current
]
assert len(symbol_list) == 1
symbol = symbol_list[0]
else:
current_revisions = util.to_tuple(current_revisions)
if not current_revisions:
raise RevisionError(
"Relative revision %s didn't "
"produce %d migrations"
% (relative, abs(rel_int))
)
# Have to check uniques here for duplicate rows test.
if len(set(current_revisions)) > 1:
util.warn(
"downgrade -1 from multiple heads is "
"ambiguous; "
"this usage will be disallowed in a future "
"release."
)
symbol = current_revisions[0]
# Restrict iteration to just the selected branch when
# ambiguous branches are involved.
branch_label = symbol
# Walk down the tree to find downgrade target.
rev = self._walk(
start=(
self.get_revision(symbol)
if branch_label is None
else self.get_revision(
"%s@%s" % (branch_label, symbol)
)
),
steps=rel_int,
no_overwalk=assert_relative_length,
)
if rev is None:
if relative_revision:
raise RevisionError(
"Relative revision %s didn't "
"produce %d migrations" % (relative, abs(rel_int))
)
else:
raise RevisionError("Walked too far")
return branch_label, rev
# No relative destination given, revision specified is absolute.
branch_label, _, symbol = target.rpartition("@")
if not branch_label:
branch_label = None
return branch_label, self.get_revision(symbol)
def _parse_upgrade_target(
self,
current_revisions: _RevisionIdentifierType,
target: _RevisionIdentifierType,
assert_relative_length: bool,
) -> Tuple[Optional[_RevisionOrBase], ...]:
"""
Parse upgrade command syntax :target to retrieve the target revision
and given the :current_revisions stamp of the database.
Returns a tuple of Revision objects which should be iterated/upgraded
to. The target may be specified in absolute form, or relative to
:current_revisions.
"""
if isinstance(target, str):
match = _relative_destination.match(target)
else:
match = None
if not match:
# No relative destination, target is absolute.
return self.get_revisions(target)
current_revisions_tup: Union[str, Tuple[Optional[str], ...], None]
current_revisions_tup = util.to_tuple(current_revisions)
branch_label, symbol, relative_str = match.groups()
relative = int(relative_str)
if relative > 0:
if symbol is None:
if not current_revisions_tup:
current_revisions_tup = (None,)
# Try to filter to a single target (avoid ambiguous branches).
start_revs = current_revisions_tup
if branch_label:
start_revs = self.filter_for_lineage(
self.get_revisions(current_revisions_tup), # type: ignore[arg-type] # noqa: E501
branch_label,
)
if not start_revs:
# The requested branch is not a head, so we need to
# backtrack to find a branchpoint.
active_on_branch = self.filter_for_lineage(
self._get_ancestor_nodes(
self.get_revisions(current_revisions_tup)
),
branch_label,
)
# Find the tips of this set of revisions (revisions
# without children within the set).
start_revs = tuple(
{rev.revision for rev in active_on_branch}
- {
down
for rev in active_on_branch
for down in rev._normalized_down_revisions
}
)
if not start_revs:
# We must need to go right back to base to find
# a starting point for this branch.
start_revs = (None,)
if len(start_revs) > 1:
raise RevisionError(
"Ambiguous upgrade from multiple current revisions"
)
# Walk up from unique target revision.
rev = self._walk(
start=start_revs[0],
steps=relative,
branch_label=branch_label,
no_overwalk=assert_relative_length,
)
if rev is None:
raise RevisionError(
"Relative revision %s didn't "
"produce %d migrations" % (relative_str, abs(relative))
)
return (rev,)
else:
# Walk is relative to a given revision, not the current state.
return (
self._walk(
start=self.get_revision(symbol),
steps=relative,
branch_label=branch_label,
no_overwalk=assert_relative_length,
),
)
else:
if symbol is None:
# Upgrading to current - n is not valid.
raise RevisionError(
"Relative revision %s didn't "
"produce %d migrations" % (relative, abs(relative))
)
return (
self._walk(
start=(
self.get_revision(symbol)
if branch_label is None
else self.get_revision(
"%s@%s" % (branch_label, symbol)
)
),
steps=relative,
no_overwalk=assert_relative_length,
),
)
def _collect_downgrade_revisions(
self,
upper: _RevisionIdentifierType,
lower: _RevisionIdentifierType,
inclusive: bool,
implicit_base: bool,
assert_relative_length: bool,
) -> Tuple[Set[Revision], Tuple[Optional[_RevisionOrBase], ...]]:
"""
Compute the set of current revisions specified by :upper, and the
downgrade target specified by :target. Return all dependents of target
which are currently active.
:inclusive=True includes the target revision in the set
"""
branch_label, target_revision = self._parse_downgrade_target(
current_revisions=upper,
target=lower,
assert_relative_length=assert_relative_length,
)
if target_revision == "base":
target_revision = None
assert target_revision is None or isinstance(target_revision, Revision)
roots: List[Revision]
# Find candidates to drop.
if target_revision is None:
# Downgrading back to base: find all tree roots.
roots = [
rev
for rev in self._revision_map.values()
if rev is not None and rev.down_revision is None
]
elif inclusive:
# inclusive implies target revision should also be dropped
roots = [target_revision]
else:
# Downgrading to fixed target: find all direct children.
roots = [
is_revision(rev)
for rev in self.get_revisions(target_revision.nextrev)
]
if branch_label and len(roots) > 1:
# Need to filter roots.
ancestors = {
rev.revision
for rev in self._get_ancestor_nodes(
[self._resolve_branch(branch_label)],
include_dependencies=False,
)
}
# Intersection gives the root revisions we are trying to
# rollback with the downgrade.
roots = [
is_revision(rev)
for rev in self.get_revisions(
{rev.revision for rev in roots}.intersection(ancestors)
)
]
# Ensure we didn't throw everything away when filtering branches.
if len(roots) == 0:
raise RevisionError(
"Not a valid downgrade target from current heads"
)
heads = self.get_revisions(upper)
# Aim is to drop :branch_revision; to do so we also need to drop its
# descendents and anything dependent on it.
downgrade_revisions = set(
self._get_descendant_nodes(
roots,
include_dependencies=True,
omit_immediate_dependencies=False,
)
)
active_revisions = set(
self._get_ancestor_nodes(heads, include_dependencies=True)
)
# Emit revisions to drop in reverse topological sorted order.
downgrade_revisions.intersection_update(active_revisions)
if implicit_base:
# Wind other branches back to base.
downgrade_revisions.update(
active_revisions.difference(self._get_ancestor_nodes(roots))
)
if (
target_revision is not None
and not downgrade_revisions
and target_revision not in heads
):
# Empty intersection: target revs are not present.
raise RangeNotAncestorError("Nothing to drop", upper)
return downgrade_revisions, heads
def _collect_upgrade_revisions(
self,
upper: _RevisionIdentifierType,
lower: _RevisionIdentifierType,
inclusive: bool,
implicit_base: bool,
assert_relative_length: bool,
) -> Tuple[Set[Revision], Tuple[Revision, ...]]:
"""
Compute the set of required revisions specified by :upper, and the
current set of active revisions specified by :lower. Find the
difference between the two to compute the required upgrades.
:inclusive=True includes the current/lower revisions in the set
:implicit_base=False only returns revisions which are downstream
of the current/lower revisions. Dependencies from branches with
different bases will not be included.
"""
targets: Collection[Revision] = [
is_revision(rev)
for rev in self._parse_upgrade_target(
current_revisions=lower,
target=upper,
assert_relative_length=assert_relative_length,
)
]
# assert type(targets) is tuple, "targets should be a tuple"
# Handled named bases (e.g. branch@... -> heads should only produce
# targets on the given branch)
if isinstance(lower, str) and "@" in lower:
branch, _, _ = lower.partition("@")
branch_rev = self.get_revision(branch)
if branch_rev is not None and branch_rev.revision == branch:
# A revision was used as a label; get its branch instead
assert len(branch_rev.branch_labels) == 1
branch = next(iter(branch_rev.branch_labels))
targets = {
need for need in targets if branch in need.branch_labels
}
required_node_set = set(
self._get_ancestor_nodes(
targets, check=True, include_dependencies=True
)
).union(targets)
current_revisions = self.get_revisions(lower)
if not implicit_base and any(
rev not in required_node_set
for rev in current_revisions
if rev is not None
):
raise RangeNotAncestorError(lower, upper)
assert (
type(current_revisions) is tuple
), "current_revisions should be a tuple"
# Special case where lower = a relative value (get_revisions can't
# find it)
if current_revisions and current_revisions[0] is None:
_, rev = self._parse_downgrade_target(
current_revisions=upper,
target=lower,
assert_relative_length=assert_relative_length,
)
assert rev
if rev == "base":
current_revisions = tuple()
lower = None
else:
current_revisions = (rev,)
lower = rev.revision
current_node_set = set(
self._get_ancestor_nodes(
current_revisions, check=True, include_dependencies=True
)
).union(current_revisions)
needs = required_node_set.difference(current_node_set)
# Include the lower revision (=current_revisions?) in the iteration
if inclusive:
needs.update(is_revision(rev) for rev in self.get_revisions(lower))
# By default, base is implicit as we want all dependencies returned.
# Base is also implicit if lower = base
# implicit_base=False -> only return direct downstreams of
# current_revisions
if current_revisions and not implicit_base:
lower_descendents = self._get_descendant_nodes(
[is_revision(rev) for rev in current_revisions],
check=True,
include_dependencies=False,
)
needs.intersection_update(lower_descendents)
return needs, tuple(targets)
def _get_all_current(
self, id_: Tuple[str, ...]
) -> Set[Optional[_RevisionOrBase]]:
top_revs: Set[Optional[_RevisionOrBase]]
top_revs = set(self.get_revisions(id_))
top_revs.update(
self._get_ancestor_nodes(list(top_revs), include_dependencies=True)
)
return self._filter_into_branch_heads(top_revs)
class Revision:
"""Base class for revisioned objects.
The :class:`.Revision` class is the base of the more public-facing
:class:`.Script` object, which represents a migration script.
The mechanics of revision management and traversal are encapsulated
within :class:`.Revision`, while :class:`.Script` applies this logic
to Python files in a version directory.
"""
nextrev: FrozenSet[str] = frozenset()
"""following revisions, based on down_revision only."""
_all_nextrev: FrozenSet[str] = frozenset()
revision: str = None # type: ignore[assignment]
"""The string revision number."""
down_revision: Optional[_RevIdType] = None
"""The ``down_revision`` identifier(s) within the migration script.
Note that the total set of "down" revisions is
down_revision + dependencies.
"""
dependencies: Optional[_RevIdType] = None
"""Additional revisions which this revision is dependent on.
From a migration standpoint, these dependencies are added to the
down_revision to form the full iteration. However, the separation
of down_revision from "dependencies" is to assist in navigating
a history that contains many branches, typically a multi-root scenario.
"""
branch_labels: Set[str] = None # type: ignore[assignment]
"""Optional string/tuple of symbolic names to apply to this
revision's branch"""
_resolved_dependencies: Tuple[str, ...]
_normalized_resolved_dependencies: Tuple[str, ...]
@classmethod
def verify_rev_id(cls, revision: str) -> None:
illegal_chars = set(revision).intersection(_revision_illegal_chars)
if illegal_chars:
raise RevisionError(
"Character(s) '%s' not allowed in revision identifier '%s'"
% (", ".join(sorted(illegal_chars)), revision)
)
def __init__(
self,
revision: str,
down_revision: Optional[Union[str, Tuple[str, ...]]],
dependencies: Optional[Union[str, Tuple[str, ...]]] = None,
branch_labels: Optional[Union[str, Tuple[str, ...]]] = None,
) -> None:
if down_revision and revision in util.to_tuple(down_revision):
raise LoopDetected(revision)
elif dependencies is not None and revision in util.to_tuple(
dependencies
):
raise DependencyLoopDetected(revision)
self.verify_rev_id(revision)
self.revision = revision
self.down_revision = tuple_rev_as_scalar(util.to_tuple(down_revision))
self.dependencies = tuple_rev_as_scalar(util.to_tuple(dependencies))
self._orig_branch_labels = util.to_tuple(branch_labels, default=())
self.branch_labels = set(self._orig_branch_labels)
def __repr__(self) -> str:
args = [repr(self.revision), repr(self.down_revision)]
if self.dependencies:
args.append("dependencies=%r" % (self.dependencies,))
if self.branch_labels:
args.append("branch_labels=%r" % (self.branch_labels,))
return "%s(%s)" % (self.__class__.__name__, ", ".join(args))
def add_nextrev(self, revision: Revision) -> None:
self._all_nextrev = self._all_nextrev.union([revision.revision])
if self.revision in revision._versioned_down_revisions:
self.nextrev = self.nextrev.union([revision.revision])
@property
def _all_down_revisions(self) -> Tuple[str, ...]:
return util.dedupe_tuple(
util.to_tuple(self.down_revision, default=())
+ self._resolved_dependencies
)
@property
def _normalized_down_revisions(self) -> Tuple[str, ...]:
"""return immediate down revisions for a rev, omitting dependencies
that are still dependencies of ancestors.
"""
return util.dedupe_tuple(
util.to_tuple(self.down_revision, default=())
+ self._normalized_resolved_dependencies
)
@property
def _versioned_down_revisions(self) -> Tuple[str, ...]:
return util.to_tuple(self.down_revision, default=())
@property
def is_head(self) -> bool:
"""Return True if this :class:`.Revision` is a 'head' revision.
This is determined based on whether any other :class:`.Script`
within the :class:`.ScriptDirectory` refers to this
:class:`.Script`. Multiple heads can be present.
"""
return not bool(self.nextrev)
@property
def _is_real_head(self) -> bool:
return not bool(self._all_nextrev)
@property
def is_base(self) -> bool:
"""Return True if this :class:`.Revision` is a 'base' revision."""
return self.down_revision is None
@property
def _is_real_base(self) -> bool:
"""Return True if this :class:`.Revision` is a "real" base revision,
e.g. that it has no dependencies either."""
# we use self.dependencies here because this is called up
# in initialization where _real_dependencies isn't set up
# yet
return self.down_revision is None and self.dependencies is None
@property
def is_branch_point(self) -> bool:
"""Return True if this :class:`.Script` is a branch point.
A branchpoint is defined as a :class:`.Script` which is referred
to by more than one succeeding :class:`.Script`, that is more
than one :class:`.Script` has a `down_revision` identifier pointing
here.
"""
return len(self.nextrev) > 1
@property
def _is_real_branch_point(self) -> bool:
"""Return True if this :class:`.Script` is a 'real' branch point,
taking into account dependencies as well.
"""
return len(self._all_nextrev) > 1
@property
def is_merge_point(self) -> bool:
"""Return True if this :class:`.Script` is a merge point."""
return len(self._versioned_down_revisions) > 1
@overload
def tuple_rev_as_scalar(rev: None) -> None: ...
@overload
def tuple_rev_as_scalar(
rev: Union[Tuple[_T, ...], List[_T]]
) -> Union[_T, Tuple[_T, ...], List[_T]]: ...
def tuple_rev_as_scalar(
rev: Optional[Sequence[_T]],
) -> Union[_T, Sequence[_T], None]:
if not rev:
return None
elif len(rev) == 1:
return rev[0]
else:
return rev
def is_revision(rev: Any) -> Revision:
assert isinstance(rev, Revision)
return rev
|
# mypy: allow-untyped-defs, allow-incomplete-defs, allow-untyped-calls
# mypy: no-warn-return-any, allow-any-generics
from __future__ import annotations
import shlex
import subprocess
import sys
from typing import Any
from typing import Callable
from typing import Dict
from typing import List
from typing import Mapping
from typing import Optional
from typing import Union
from .. import util
from ..util import compat
REVISION_SCRIPT_TOKEN = "REVISION_SCRIPT_FILENAME"
_registry: dict = {}
def register(name: str) -> Callable:
"""A function decorator that will register that function as a write hook.
See the documentation linked below for an example.
.. seealso::
:ref:`post_write_hooks_custom`
"""
def decorate(fn):
_registry[name] = fn
return fn
return decorate
def _invoke(
name: str, revision: str, options: Mapping[str, Union[str, int]]
) -> Any:
"""Invokes the formatter registered for the given name.
:param name: The name of a formatter in the registry
:param revision: A :class:`.MigrationRevision` instance
:param options: A dict containing kwargs passed to the
specified formatter.
:raises: :class:`alembic.util.CommandError`
"""
try:
hook = _registry[name]
except KeyError as ke:
raise util.CommandError(
f"No formatter with name '{name}' registered"
) from ke
else:
return hook(revision, options)
def _run_hooks(path: str, hook_config: Mapping[str, str]) -> None:
"""Invoke hooks for a generated revision."""
from .base import _split_on_space_comma
names = _split_on_space_comma.split(hook_config.get("hooks", ""))
for name in names:
if not name:
continue
opts = {
key[len(name) + 1 :]: hook_config[key]
for key in hook_config
if key.startswith(name + ".")
}
opts["_hook_name"] = name
try:
type_ = opts["type"]
except KeyError as ke:
raise util.CommandError(
f"Key {name}.type is required for post write hook {name!r}"
) from ke
else:
with util.status(
f"Running post write hook {name!r}", newline=True
):
_invoke(type_, path, opts)
def _parse_cmdline_options(cmdline_options_str: str, path: str) -> List[str]:
"""Parse options from a string into a list.
Also substitutes the revision script token with the actual filename of
the revision script.
If the revision script token doesn't occur in the options string, it is
automatically prepended.
"""
if REVISION_SCRIPT_TOKEN not in cmdline_options_str:
cmdline_options_str = REVISION_SCRIPT_TOKEN + " " + cmdline_options_str
cmdline_options_list = shlex.split(
cmdline_options_str, posix=compat.is_posix
)
cmdline_options_list = [
option.replace(REVISION_SCRIPT_TOKEN, path)
for option in cmdline_options_list
]
return cmdline_options_list
@register("console_scripts")
def console_scripts(
path: str, options: dict, ignore_output: bool = False
) -> None:
try:
entrypoint_name = options["entrypoint"]
except KeyError as ke:
raise util.CommandError(
f"Key {options['_hook_name']}.entrypoint is required for post "
f"write hook {options['_hook_name']!r}"
) from ke
for entry in compat.importlib_metadata_get("console_scripts"):
if entry.name == entrypoint_name:
impl: Any = entry
break
else:
raise util.CommandError(
f"Could not find entrypoint console_scripts.{entrypoint_name}"
)
cwd: Optional[str] = options.get("cwd", None)
cmdline_options_str = options.get("options", "")
cmdline_options_list = _parse_cmdline_options(cmdline_options_str, path)
kw: Dict[str, Any] = {}
if ignore_output:
kw["stdout"] = kw["stderr"] = subprocess.DEVNULL
subprocess.run(
[
sys.executable,
"-c",
f"import {impl.module}; {impl.module}.{impl.attr}()",
]
+ cmdline_options_list,
cwd=cwd,
**kw,
)
@register("exec")
def exec_(path: str, options: dict, ignore_output: bool = False) -> None:
try:
executable = options["executable"]
except KeyError as ke:
raise util.CommandError(
f"Key {options['_hook_name']}.executable is required for post "
f"write hook {options['_hook_name']!r}"
) from ke
cwd: Optional[str] = options.get("cwd", None)
cmdline_options_str = options.get("options", "")
cmdline_options_list = _parse_cmdline_options(cmdline_options_str, path)
kw: Dict[str, Any] = {}
if ignore_output:
kw["stdout"] = kw["stderr"] = subprocess.DEVNULL
subprocess.run(
[
executable,
*cmdline_options_list,
],
cwd=cwd,
**kw,
)
|
from .base import Script
from .base import ScriptDirectory
__all__ = ["ScriptDirectory", "Script"]
|
import asyncio
from logging.config import fileConfig
from sqlalchemy import pool
from sqlalchemy.engine import Connection
from sqlalchemy.ext.asyncio import async_engine_from_config
from alembic import context
# this is the Alembic Config object, which provides
# access to the values within the .ini file in use.
config = context.config
# Interpret the config file for Python logging.
# This line sets up loggers basically.
if config.config_file_name is not None:
fileConfig(config.config_file_name)
# add your model's MetaData object here
# for 'autogenerate' support
# from myapp import mymodel
# target_metadata = mymodel.Base.metadata
target_metadata = None
# other values from the config, defined by the needs of env.py,
# can be acquired:
# my_important_option = config.get_main_option("my_important_option")
# ... etc.
def run_migrations_offline() -> None:
"""Run migrations in 'offline' mode.
This configures the context with just a URL
and not an Engine, though an Engine is acceptable
here as well. By skipping the Engine creation
we don't even need a DBAPI to be available.
Calls to context.execute() here emit the given string to the
script output.
"""
url = config.get_main_option("sqlalchemy.url")
context.configure(
url=url,
target_metadata=target_metadata,
literal_binds=True,
dialect_opts={"paramstyle": "named"},
)
with context.begin_transaction():
context.run_migrations()
def do_run_migrations(connection: Connection) -> None:
context.configure(connection=connection, target_metadata=target_metadata)
with context.begin_transaction():
context.run_migrations()
async def run_async_migrations() -> None:
"""In this scenario we need to create an Engine
and associate a connection with the context.
"""
connectable = async_engine_from_config(
config.get_section(config.config_ini_section, {}),
prefix="sqlalchemy.",
poolclass=pool.NullPool,
)
async with connectable.connect() as connection:
await connection.run_sync(do_run_migrations)
await connectable.dispose()
def run_migrations_online() -> None:
"""Run migrations in 'online' mode."""
asyncio.run(run_async_migrations())
if context.is_offline_mode():
run_migrations_offline()
else:
run_migrations_online()
|
from logging.config import fileConfig
from sqlalchemy import engine_from_config
from sqlalchemy import pool
from alembic import context
# this is the Alembic Config object, which provides
# access to the values within the .ini file in use.
config = context.config
# Interpret the config file for Python logging.
# This line sets up loggers basically.
if config.config_file_name is not None:
fileConfig(config.config_file_name)
# add your model's MetaData object here
# for 'autogenerate' support
# from myapp import mymodel
# target_metadata = mymodel.Base.metadata
target_metadata = None
# other values from the config, defined by the needs of env.py,
# can be acquired:
# my_important_option = config.get_main_option("my_important_option")
# ... etc.
def run_migrations_offline() -> None:
"""Run migrations in 'offline' mode.
This configures the context with just a URL
and not an Engine, though an Engine is acceptable
here as well. By skipping the Engine creation
we don't even need a DBAPI to be available.
Calls to context.execute() here emit the given string to the
script output.
"""
url = config.get_main_option("sqlalchemy.url")
context.configure(
url=url,
target_metadata=target_metadata,
literal_binds=True,
dialect_opts={"paramstyle": "named"},
)
with context.begin_transaction():
context.run_migrations()
def run_migrations_online() -> None:
"""Run migrations in 'online' mode.
In this scenario we need to create an Engine
and associate a connection with the context.
"""
connectable = engine_from_config(
config.get_section(config.config_ini_section, {}),
prefix="sqlalchemy.",
poolclass=pool.NullPool,
)
with connectable.connect() as connection:
context.configure(
connection=connection, target_metadata=target_metadata
)
with context.begin_transaction():
context.run_migrations()
if context.is_offline_mode():
run_migrations_offline()
else:
run_migrations_online()
|
import logging
from logging.config import fileConfig
import re
from sqlalchemy import engine_from_config
from sqlalchemy import pool
from alembic import context
USE_TWOPHASE = False
# this is the Alembic Config object, which provides
# access to the values within the .ini file in use.
config = context.config
# Interpret the config file for Python logging.
# This line sets up loggers basically.
if config.config_file_name is not None:
fileConfig(config.config_file_name)
logger = logging.getLogger("alembic.env")
# gather section names referring to different
# databases. These are named "engine1", "engine2"
# in the sample .ini file.
db_names = config.get_main_option("databases", "")
# add your model's MetaData objects here
# for 'autogenerate' support. These must be set
# up to hold just those tables targeting a
# particular database. table.tometadata() may be
# helpful here in case a "copy" of
# a MetaData is needed.
# from myapp import mymodel
# target_metadata = {
# 'engine1':mymodel.metadata1,
# 'engine2':mymodel.metadata2
# }
target_metadata = {}
# other values from the config, defined by the needs of env.py,
# can be acquired:
# my_important_option = config.get_main_option("my_important_option")
# ... etc.
def run_migrations_offline() -> None:
"""Run migrations in 'offline' mode.
This configures the context with just a URL
and not an Engine, though an Engine is acceptable
here as well. By skipping the Engine creation
we don't even need a DBAPI to be available.
Calls to context.execute() here emit the given string to the
script output.
"""
# for the --sql use case, run migrations for each URL into
# individual files.
engines = {}
for name in re.split(r",\s*", db_names):
engines[name] = rec = {}
rec["url"] = context.config.get_section_option(name, "sqlalchemy.url")
for name, rec in engines.items():
logger.info("Migrating database %s" % name)
file_ = "%s.sql" % name
logger.info("Writing output to %s" % file_)
with open(file_, "w") as buffer:
context.configure(
url=rec["url"],
output_buffer=buffer,
target_metadata=target_metadata.get(name),
literal_binds=True,
dialect_opts={"paramstyle": "named"},
)
with context.begin_transaction():
context.run_migrations(engine_name=name)
def run_migrations_online() -> None:
"""Run migrations in 'online' mode.
In this scenario we need to create an Engine
and associate a connection with the context.
"""
# for the direct-to-DB use case, start a transaction on all
# engines, then run all migrations, then commit all transactions.
engines = {}
for name in re.split(r",\s*", db_names):
engines[name] = rec = {}
rec["engine"] = engine_from_config(
context.config.get_section(name, {}),
prefix="sqlalchemy.",
poolclass=pool.NullPool,
)
for name, rec in engines.items():
engine = rec["engine"]
rec["connection"] = conn = engine.connect()
if USE_TWOPHASE:
rec["transaction"] = conn.begin_twophase()
else:
rec["transaction"] = conn.begin()
try:
for name, rec in engines.items():
logger.info("Migrating database %s" % name)
context.configure(
connection=rec["connection"],
upgrade_token="%s_upgrades" % name,
downgrade_token="%s_downgrades" % name,
target_metadata=target_metadata.get(name),
)
context.run_migrations(engine_name=name)
if USE_TWOPHASE:
for rec in engines.values():
rec["transaction"].prepare()
for rec in engines.values():
rec["transaction"].commit()
except:
for rec in engines.values():
rec["transaction"].rollback()
raise
finally:
for rec in engines.values():
rec["connection"].close()
if context.is_offline_mode():
run_migrations_offline()
else:
run_migrations_online()
|
from __future__ import annotations
import contextlib
import re
import sys
from typing import Any
from typing import Dict
from sqlalchemy import exc as sa_exc
from sqlalchemy.engine import default
from sqlalchemy.engine import URL
from sqlalchemy.testing.assertions import _expect_warnings
from sqlalchemy.testing.assertions import eq_ # noqa
from sqlalchemy.testing.assertions import is_ # noqa
from sqlalchemy.testing.assertions import is_false # noqa
from sqlalchemy.testing.assertions import is_not_ # noqa
from sqlalchemy.testing.assertions import is_true # noqa
from sqlalchemy.testing.assertions import ne_ # noqa
from sqlalchemy.util import decorator
def _assert_proper_exception_context(exception):
"""assert that any exception we're catching does not have a __context__
without a __cause__, and that __suppress_context__ is never set.
Python 3 will report nested as exceptions as "during the handling of
error X, error Y occurred". That's not what we want to do. we want
these exceptions in a cause chain.
"""
if (
exception.__context__ is not exception.__cause__
and not exception.__suppress_context__
):
assert False, (
"Exception %r was correctly raised but did not set a cause, "
"within context %r as its cause."
% (exception, exception.__context__)
)
def assert_raises(except_cls, callable_, *args, **kw):
return _assert_raises(except_cls, callable_, args, kw, check_context=True)
def assert_raises_context_ok(except_cls, callable_, *args, **kw):
return _assert_raises(except_cls, callable_, args, kw)
def assert_raises_message(except_cls, msg, callable_, *args, **kwargs):
return _assert_raises(
except_cls, callable_, args, kwargs, msg=msg, check_context=True
)
def assert_raises_message_context_ok(
except_cls, msg, callable_, *args, **kwargs
):
return _assert_raises(except_cls, callable_, args, kwargs, msg=msg)
def _assert_raises(
except_cls, callable_, args, kwargs, msg=None, check_context=False
):
with _expect_raises(except_cls, msg, check_context) as ec:
callable_(*args, **kwargs)
return ec.error
class _ErrorContainer:
error: Any = None
@contextlib.contextmanager
def _expect_raises(
except_cls, msg=None, check_context=False, text_exact=False
):
ec = _ErrorContainer()
if check_context:
are_we_already_in_a_traceback = sys.exc_info()[0]
try:
yield ec
success = False
except except_cls as err:
ec.error = err
success = True
if msg is not None:
if text_exact:
assert str(err) == msg, f"{msg} != {err}"
else:
assert re.search(msg, str(err), re.UNICODE), f"{msg} !~ {err}"
if check_context and not are_we_already_in_a_traceback:
_assert_proper_exception_context(err)
print(str(err).encode("utf-8"))
# assert outside the block so it works for AssertionError too !
assert success, "Callable did not raise an exception"
def expect_raises(except_cls, check_context=True):
return _expect_raises(except_cls, check_context=check_context)
def expect_raises_message(
except_cls, msg, check_context=True, text_exact=False
):
return _expect_raises(
except_cls, msg=msg, check_context=check_context, text_exact=text_exact
)
def eq_ignore_whitespace(a, b, msg=None):
a = re.sub(r"^\s+?|\n", "", a)
a = re.sub(r" {2,}", " ", a)
b = re.sub(r"^\s+?|\n", "", b)
b = re.sub(r" {2,}", " ", b)
assert a == b, msg or "%r != %r" % (a, b)
_dialect_mods: Dict[Any, Any] = {}
def _get_dialect(name):
if name is None or name == "default":
return default.DefaultDialect()
else:
d = URL.create(name).get_dialect()()
if name == "postgresql":
d.implicit_returning = True
elif name == "mssql":
d.legacy_schema_aliasing = False
return d
def expect_warnings(*messages, **kw):
"""Context manager which expects one or more warnings.
With no arguments, squelches all SAWarnings emitted via
sqlalchemy.util.warn and sqlalchemy.util.warn_limited. Otherwise
pass string expressions that will match selected warnings via regex;
all non-matching warnings are sent through.
The expect version **asserts** that the warnings were in fact seen.
Note that the test suite sets SAWarning warnings to raise exceptions.
"""
return _expect_warnings(Warning, messages, **kw)
def emits_python_deprecation_warning(*messages):
"""Decorator form of expect_warnings().
Note that emits_warning does **not** assert that the warnings
were in fact seen.
"""
@decorator
def decorate(fn, *args, **kw):
with _expect_warnings(DeprecationWarning, assert_=False, *messages):
return fn(*args, **kw)
return decorate
def expect_sqlalchemy_deprecated(*messages, **kw):
return _expect_warnings(sa_exc.SADeprecationWarning, messages, **kw)
def expect_sqlalchemy_deprecated_20(*messages, **kw):
return _expect_warnings(sa_exc.RemovedIn20Warning, messages, **kw)
|
import importlib.machinery
import os
from pathlib import Path
import shutil
import textwrap
from sqlalchemy.testing import config
from sqlalchemy.testing import provision
from . import util as testing_util
from .. import command
from .. import script
from .. import util
from ..script import Script
from ..script import ScriptDirectory
def _get_staging_directory():
if provision.FOLLOWER_IDENT:
return f"scratch_{provision.FOLLOWER_IDENT}"
else:
return "scratch"
def staging_env(create=True, template="generic", sourceless=False):
cfg = _testing_config()
if create:
path = _join_path(_get_staging_directory(), "scripts")
assert not os.path.exists(path), (
"staging directory %s already exists; poor cleanup?" % path
)
command.init(cfg, path, template=template)
if sourceless:
try:
# do an import so that a .pyc/.pyo is generated.
util.load_python_file(path, "env.py")
except AttributeError:
# we don't have the migration context set up yet
# so running the .env py throws this exception.
# theoretically we could be using py_compiler here to
# generate .pyc/.pyo without importing but not really
# worth it.
pass
assert sourceless in (
"pep3147_envonly",
"simple",
"pep3147_everything",
), sourceless
make_sourceless(
_join_path(path, "env.py"),
"pep3147" if "pep3147" in sourceless else "simple",
)
sc = script.ScriptDirectory.from_config(cfg)
return sc
def clear_staging_env():
from sqlalchemy.testing import engines
engines.testing_reaper.close_all()
shutil.rmtree(_get_staging_directory(), True)
def script_file_fixture(txt):
dir_ = _join_path(_get_staging_directory(), "scripts")
path = _join_path(dir_, "script.py.mako")
with open(path, "w") as f:
f.write(txt)
def env_file_fixture(txt):
dir_ = _join_path(_get_staging_directory(), "scripts")
txt = (
"""
from alembic import context
config = context.config
"""
+ txt
)
path = _join_path(dir_, "env.py")
pyc_path = util.pyc_file_from_path(path)
if pyc_path:
os.unlink(pyc_path)
with open(path, "w") as f:
f.write(txt)
def _sqlite_file_db(tempname="foo.db", future=False, scope=None, **options):
dir_ = _join_path(_get_staging_directory(), "scripts")
url = "sqlite:///%s/%s" % (dir_, tempname)
if scope:
options["scope"] = scope
return testing_util.testing_engine(url=url, future=future, options=options)
def _sqlite_testing_config(sourceless=False, future=False):
dir_ = _join_path(_get_staging_directory(), "scripts")
url = f"sqlite:///{dir_}/foo.db"
sqlalchemy_future = future or ("future" in config.db.__class__.__module__)
return _write_config_file(
f"""
[alembic]
script_location = {dir_}
sqlalchemy.url = {url}
sourceless = {"true" if sourceless else "false"}
{"sqlalchemy.future = true" if sqlalchemy_future else ""}
[loggers]
keys = root,sqlalchemy
[handlers]
keys = console
[logger_root]
level = WARNING
handlers = console
qualname =
[logger_sqlalchemy]
level = DEBUG
handlers =
qualname = sqlalchemy.engine
[handler_console]
class = StreamHandler
args = (sys.stderr,)
level = NOTSET
formatter = generic
[formatters]
keys = generic
[formatter_generic]
format = %%(levelname)-5.5s [%%(name)s] %%(message)s
datefmt = %%H:%%M:%%S
"""
)
def _multi_dir_testing_config(sourceless=False, extra_version_location=""):
dir_ = _join_path(_get_staging_directory(), "scripts")
sqlalchemy_future = "future" in config.db.__class__.__module__
url = "sqlite:///%s/foo.db" % dir_
return _write_config_file(
f"""
[alembic]
script_location = {dir_}
sqlalchemy.url = {url}
sqlalchemy.future = {"true" if sqlalchemy_future else "false"}
sourceless = {"true" if sourceless else "false"}
version_locations = %(here)s/model1/ %(here)s/model2/ %(here)s/model3/ \
{extra_version_location}
[loggers]
keys = root
[handlers]
keys = console
[logger_root]
level = WARNING
handlers = console
qualname =
[handler_console]
class = StreamHandler
args = (sys.stderr,)
level = NOTSET
formatter = generic
[formatters]
keys = generic
[formatter_generic]
format = %%(levelname)-5.5s [%%(name)s] %%(message)s
datefmt = %%H:%%M:%%S
"""
)
def _no_sql_testing_config(dialect="postgresql", directives=""):
"""use a postgresql url with no host so that
connections guaranteed to fail"""
dir_ = _join_path(_get_staging_directory(), "scripts")
return _write_config_file(
f"""
[alembic]
script_location ={dir_}
sqlalchemy.url = {dialect}://
{directives}
[loggers]
keys = root
[handlers]
keys = console
[logger_root]
level = WARNING
handlers = console
qualname =
[handler_console]
class = StreamHandler
args = (sys.stderr,)
level = NOTSET
formatter = generic
[formatters]
keys = generic
[formatter_generic]
format = %%(levelname)-5.5s [%%(name)s] %%(message)s
datefmt = %%H:%%M:%%S
"""
)
def _write_config_file(text):
cfg = _testing_config()
with open(cfg.config_file_name, "w") as f:
f.write(text)
return cfg
def _testing_config():
from alembic.config import Config
if not os.access(_get_staging_directory(), os.F_OK):
os.mkdir(_get_staging_directory())
return Config(_join_path(_get_staging_directory(), "test_alembic.ini"))
def write_script(
scriptdir, rev_id, content, encoding="ascii", sourceless=False
):
old = scriptdir.revision_map.get_revision(rev_id)
path = old.path
content = textwrap.dedent(content)
if encoding:
content = content.encode(encoding)
with open(path, "wb") as fp:
fp.write(content)
pyc_path = util.pyc_file_from_path(path)
if pyc_path:
os.unlink(pyc_path)
script = Script._from_path(scriptdir, path)
old = scriptdir.revision_map.get_revision(script.revision)
if old.down_revision != script.down_revision:
raise Exception("Can't change down_revision on a refresh operation.")
scriptdir.revision_map.add_revision(script, _replace=True)
if sourceless:
make_sourceless(
path, "pep3147" if sourceless == "pep3147_everything" else "simple"
)
def make_sourceless(path, style):
import py_compile
py_compile.compile(path)
if style == "simple":
pyc_path = util.pyc_file_from_path(path)
suffix = importlib.machinery.BYTECODE_SUFFIXES[0]
filepath, ext = os.path.splitext(path)
simple_pyc_path = filepath + suffix
shutil.move(pyc_path, simple_pyc_path)
pyc_path = simple_pyc_path
else:
assert style in ("pep3147", "simple")
pyc_path = util.pyc_file_from_path(path)
assert os.access(pyc_path, os.F_OK)
os.unlink(path)
def three_rev_fixture(cfg):
a = util.rev_id()
b = util.rev_id()
c = util.rev_id()
script = ScriptDirectory.from_config(cfg)
script.generate_revision(a, "revision a", refresh=True, head="base")
write_script(
script,
a,
f"""\
"Rev A"
revision = '{a}'
down_revision = None
from alembic import op
def upgrade():
op.execute("CREATE STEP 1")
def downgrade():
op.execute("DROP STEP 1")
""",
)
script.generate_revision(b, "revision b", refresh=True, head=a)
write_script(
script,
b,
f"""# coding: utf-8
"Rev B, méil, %3"
revision = '{b}'
down_revision = '{a}'
from alembic import op
def upgrade():
op.execute("CREATE STEP 2")
def downgrade():
op.execute("DROP STEP 2")
""",
encoding="utf-8",
)
script.generate_revision(c, "revision c", refresh=True, head=b)
write_script(
script,
c,
f"""\
"Rev C"
revision = '{c}'
down_revision = '{b}'
from alembic import op
def upgrade():
op.execute("CREATE STEP 3")
def downgrade():
op.execute("DROP STEP 3")
""",
)
return a, b, c
def multi_heads_fixture(cfg, a, b, c):
"""Create a multiple head fixture from the three-revs fixture"""
# a->b->c
# -> d -> e
# -> f
d = util.rev_id()
e = util.rev_id()
f = util.rev_id()
script = ScriptDirectory.from_config(cfg)
script.generate_revision(
d, "revision d from b", head=b, splice=True, refresh=True
)
write_script(
script,
d,
f"""\
"Rev D"
revision = '{d}'
down_revision = '{b}'
from alembic import op
def upgrade():
op.execute("CREATE STEP 4")
def downgrade():
op.execute("DROP STEP 4")
""",
)
script.generate_revision(
e, "revision e from d", head=d, splice=True, refresh=True
)
write_script(
script,
e,
f"""\
"Rev E"
revision = '{e}'
down_revision = '{d}'
from alembic import op
def upgrade():
op.execute("CREATE STEP 5")
def downgrade():
op.execute("DROP STEP 5")
""",
)
script.generate_revision(
f, "revision f from b", head=b, splice=True, refresh=True
)
write_script(
script,
f,
f"""\
"Rev F"
revision = '{f}'
down_revision = '{b}'
from alembic import op
def upgrade():
op.execute("CREATE STEP 6")
def downgrade():
op.execute("DROP STEP 6")
""",
)
return d, e, f
def _multidb_testing_config(engines):
"""alembic.ini fixture to work exactly with the 'multidb' template"""
dir_ = _join_path(_get_staging_directory(), "scripts")
sqlalchemy_future = "future" in config.db.__class__.__module__
databases = ", ".join(engines.keys())
engines = "\n\n".join(
f"[{key}]\nsqlalchemy.url = {value.url}"
for key, value in engines.items()
)
return _write_config_file(
f"""
[alembic]
script_location = {dir_}
sourceless = false
sqlalchemy.future = {"true" if sqlalchemy_future else "false"}
databases = {databases}
{engines}
[loggers]
keys = root
[handlers]
keys = console
[logger_root]
level = WARNING
handlers = console
qualname =
[handler_console]
class = StreamHandler
args = (sys.stderr,)
level = NOTSET
formatter = generic
[formatters]
keys = generic
[formatter_generic]
format = %%(levelname)-5.5s [%%(name)s] %%(message)s
datefmt = %%H:%%M:%%S
"""
)
def _join_path(base: str, *more: str):
return str(Path(base).joinpath(*more).as_posix())
|
from __future__ import annotations
import configparser
from contextlib import contextmanager
import io
import re
from typing import Any
from typing import Dict
from sqlalchemy import Column
from sqlalchemy import create_mock_engine
from sqlalchemy import inspect
from sqlalchemy import MetaData
from sqlalchemy import String
from sqlalchemy import Table
from sqlalchemy import testing
from sqlalchemy import text
from sqlalchemy.testing import config
from sqlalchemy.testing import mock
from sqlalchemy.testing.assertions import eq_
from sqlalchemy.testing.fixtures import FutureEngineMixin
from sqlalchemy.testing.fixtures import TablesTest as SQLAlchemyTablesTest
from sqlalchemy.testing.fixtures import TestBase as SQLAlchemyTestBase
import alembic
from .assertions import _get_dialect
from ..environment import EnvironmentContext
from ..migration import MigrationContext
from ..operations import Operations
from ..util import sqla_compat
from ..util.sqla_compat import sqla_2
testing_config = configparser.ConfigParser()
testing_config.read(["test.cfg"])
class TestBase(SQLAlchemyTestBase):
is_sqlalchemy_future = sqla_2
@testing.fixture()
def ops_context(self, migration_context):
with migration_context.begin_transaction(_per_migration=True):
yield Operations(migration_context)
@testing.fixture
def migration_context(self, connection):
return MigrationContext.configure(
connection, opts=dict(transaction_per_migration=True)
)
@testing.fixture
def as_sql_migration_context(self, connection):
return MigrationContext.configure(
connection, opts=dict(transaction_per_migration=True, as_sql=True)
)
@testing.fixture
def connection(self):
with config.db.connect() as conn:
yield conn
class TablesTest(TestBase, SQLAlchemyTablesTest):
pass
FutureEngineMixin.is_sqlalchemy_future = True
def capture_db(dialect="postgresql://"):
buf = []
def dump(sql, *multiparams, **params):
buf.append(str(sql.compile(dialect=engine.dialect)))
engine = create_mock_engine(dialect, dump)
return engine, buf
_engs: Dict[Any, Any] = {}
@contextmanager
def capture_context_buffer(**kw):
if kw.pop("bytes_io", False):
buf = io.BytesIO()
else:
buf = io.StringIO()
kw.update({"dialect_name": "sqlite", "output_buffer": buf})
conf = EnvironmentContext.configure
def configure(*arg, **opt):
opt.update(**kw)
return conf(*arg, **opt)
with mock.patch.object(EnvironmentContext, "configure", configure):
yield buf
@contextmanager
def capture_engine_context_buffer(**kw):
from .env import _sqlite_file_db
from sqlalchemy import event
buf = io.StringIO()
eng = _sqlite_file_db()
conn = eng.connect()
@event.listens_for(conn, "before_cursor_execute")
def bce(conn, cursor, statement, parameters, context, executemany):
buf.write(statement + "\n")
kw.update({"connection": conn})
conf = EnvironmentContext.configure
def configure(*arg, **opt):
opt.update(**kw)
return conf(*arg, **opt)
with mock.patch.object(EnvironmentContext, "configure", configure):
yield buf
def op_fixture(
dialect="default",
as_sql=False,
naming_convention=None,
literal_binds=False,
native_boolean=None,
):
opts = {}
if naming_convention:
opts["target_metadata"] = MetaData(naming_convention=naming_convention)
class buffer_:
def __init__(self):
self.lines = []
def write(self, msg):
msg = msg.strip()
msg = re.sub(r"[\n\t]", "", msg)
if as_sql:
# the impl produces soft tabs,
# so search for blocks of 4 spaces
msg = re.sub(r" ", "", msg)
msg = re.sub(r"\;\n*$", "", msg)
self.lines.append(msg)
def flush(self):
pass
buf = buffer_()
class ctx(MigrationContext):
def get_buf(self):
return buf
def clear_assertions(self):
buf.lines[:] = []
def assert_(self, *sql):
# TODO: make this more flexible about
# whitespace and such
eq_(buf.lines, [re.sub(r"[\n\t]", "", s) for s in sql])
def assert_contains(self, sql):
for stmt in buf.lines:
if re.sub(r"[\n\t]", "", sql) in stmt:
return
else:
assert False, "Could not locate fragment %r in %r" % (
sql,
buf.lines,
)
if as_sql:
opts["as_sql"] = as_sql
if literal_binds:
opts["literal_binds"] = literal_binds
ctx_dialect = _get_dialect(dialect)
if native_boolean is not None:
ctx_dialect.supports_native_boolean = native_boolean
# this is new as of SQLAlchemy 1.2.7 and is used by SQL Server,
# which breaks assumptions in the alembic test suite
ctx_dialect.non_native_boolean_check_constraint = True
if not as_sql:
def execute(stmt, *multiparam, **param):
if isinstance(stmt, str):
stmt = text(stmt)
assert stmt.supports_execution
sql = str(stmt.compile(dialect=ctx_dialect))
buf.write(sql)
connection = mock.Mock(dialect=ctx_dialect, execute=execute)
else:
opts["output_buffer"] = buf
connection = None
context = ctx(ctx_dialect, connection, opts)
alembic.op._proxy = Operations(context)
return context
class AlterColRoundTripFixture:
# since these tests are about syntax, use more recent SQLAlchemy as some of
# the type / server default compare logic might not work on older
# SQLAlchemy versions as seems to be the case for SQLAlchemy 1.1 on Oracle
__requires__ = ("alter_column",)
def setUp(self):
self.conn = config.db.connect()
self.ctx = MigrationContext.configure(self.conn)
self.op = Operations(self.ctx)
self.metadata = MetaData()
def _compare_type(self, t1, t2):
c1 = Column("q", t1)
c2 = Column("q", t2)
assert not self.ctx.impl.compare_type(
c1, c2
), "Type objects %r and %r didn't compare as equivalent" % (t1, t2)
def _compare_server_default(self, t1, s1, t2, s2):
c1 = Column("q", t1, server_default=s1)
c2 = Column("q", t2, server_default=s2)
assert not self.ctx.impl.compare_server_default(
c1, c2, s2, s1
), "server defaults %r and %r didn't compare as equivalent" % (s1, s2)
def tearDown(self):
sqla_compat._safe_rollback_connection_transaction(self.conn)
with self.conn.begin():
self.metadata.drop_all(self.conn)
self.conn.close()
def _run_alter_col(self, from_, to_, compare=None):
column = Column(
from_.get("name", "colname"),
from_.get("type", String(10)),
nullable=from_.get("nullable", True),
server_default=from_.get("server_default", None),
# comment=from_.get("comment", None)
)
t = Table("x", self.metadata, column)
with sqla_compat._ensure_scope_for_ddl(self.conn):
t.create(self.conn)
insp = inspect(self.conn)
old_col = insp.get_columns("x")[0]
# TODO: conditional comment support
self.op.alter_column(
"x",
column.name,
existing_type=column.type,
existing_server_default=(
column.server_default
if column.server_default is not None
else False
),
existing_nullable=True if column.nullable else False,
# existing_comment=column.comment,
nullable=to_.get("nullable", None),
# modify_comment=False,
server_default=to_.get("server_default", False),
new_column_name=to_.get("name", None),
type_=to_.get("type", None),
)
insp = inspect(self.conn)
new_col = insp.get_columns("x")[0]
if compare is None:
compare = to_
eq_(
new_col["name"],
compare["name"] if "name" in compare else column.name,
)
self._compare_type(
new_col["type"], compare.get("type", old_col["type"])
)
eq_(new_col["nullable"], compare.get("nullable", column.nullable))
self._compare_server_default(
new_col["type"],
new_col.get("default", None),
compare.get("type", old_col["type"]),
(
compare["server_default"].text
if "server_default" in compare
else (
column.server_default.arg.text
if column.server_default is not None
else None
)
),
)
|
from sqlalchemy.testing.requirements import Requirements
from alembic import util
from ..testing import exclusions
class SuiteRequirements(Requirements):
@property
def schemas(self):
"""Target database must support external schemas, and have one
named 'test_schema'."""
return exclusions.open()
@property
def autocommit_isolation(self):
"""target database should support 'AUTOCOMMIT' isolation level"""
return exclusions.closed()
@property
def materialized_views(self):
"""needed for sqlalchemy compat"""
return exclusions.closed()
@property
def unique_constraint_reflection(self):
def doesnt_have_check_uq_constraints(config):
from sqlalchemy import inspect
insp = inspect(config.db)
try:
insp.get_unique_constraints("x")
except NotImplementedError:
return True
except TypeError:
return True
except Exception:
pass
return False
return exclusions.skip_if(doesnt_have_check_uq_constraints)
@property
def sequences(self):
"""Target database must support SEQUENCEs."""
return exclusions.only_if(
[lambda config: config.db.dialect.supports_sequences],
"no sequence support",
)
@property
def foreign_key_match(self):
return exclusions.open()
@property
def foreign_key_constraint_reflection(self):
return exclusions.open()
@property
def check_constraints_w_enforcement(self):
"""Target database must support check constraints
and also enforce them."""
return exclusions.open()
@property
def reflects_pk_names(self):
return exclusions.closed()
@property
def reflects_fk_options(self):
return exclusions.closed()
@property
def sqlalchemy_1x(self):
return exclusions.skip_if(
lambda config: util.sqla_2,
"SQLAlchemy 1.x test",
)
@property
def sqlalchemy_2(self):
return exclusions.skip_if(
lambda config: not util.sqla_2,
"SQLAlchemy 2.x test",
)
@property
def asyncio(self):
def go(config):
try:
import greenlet # noqa: F401
except ImportError:
return False
else:
return True
return exclusions.only_if(go)
@property
def comments(self):
return exclusions.only_if(
lambda config: config.db.dialect.supports_comments
)
@property
def alter_column(self):
return exclusions.open()
@property
def computed_columns(self):
return exclusions.closed()
@property
def autoincrement_on_composite_pk(self):
return exclusions.closed()
@property
def fk_ondelete_is_reflected(self):
return exclusions.closed()
@property
def fk_onupdate_is_reflected(self):
return exclusions.closed()
@property
def fk_onupdate(self):
return exclusions.open()
@property
def fk_ondelete_restrict(self):
return exclusions.open()
@property
def fk_onupdate_restrict(self):
return exclusions.open()
@property
def fk_ondelete_noaction(self):
return exclusions.open()
@property
def fk_initially(self):
return exclusions.closed()
@property
def fk_deferrable(self):
return exclusions.closed()
@property
def fk_deferrable_is_reflected(self):
return exclusions.closed()
@property
def fk_names(self):
return exclusions.open()
@property
def integer_subtype_comparisons(self):
return exclusions.open()
@property
def no_name_normalize(self):
return exclusions.skip_if(
lambda config: config.db.dialect.requires_name_normalize
)
@property
def identity_columns(self):
return exclusions.closed()
@property
def identity_columns_alter(self):
return exclusions.closed()
|
from itertools import zip_longest
from sqlalchemy import schema
from sqlalchemy.sql.elements import ClauseList
class CompareTable:
def __init__(self, table):
self.table = table
def __eq__(self, other):
if self.table.name != other.name or self.table.schema != other.schema:
return False
for c1, c2 in zip_longest(self.table.c, other.c):
if (c1 is None and c2 is not None) or (
c2 is None and c1 is not None
):
return False
if CompareColumn(c1) != c2:
return False
return True
# TODO: compare constraints, indexes
def __ne__(self, other):
return not self.__eq__(other)
class CompareColumn:
def __init__(self, column):
self.column = column
def __eq__(self, other):
return (
self.column.name == other.name
and self.column.nullable == other.nullable
)
# TODO: datatypes etc
def __ne__(self, other):
return not self.__eq__(other)
class CompareIndex:
def __init__(self, index, name_only=False):
self.index = index
self.name_only = name_only
def __eq__(self, other):
if self.name_only:
return self.index.name == other.name
else:
return (
str(schema.CreateIndex(self.index))
== str(schema.CreateIndex(other))
and self.index.dialect_kwargs == other.dialect_kwargs
)
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
expr = ClauseList(*self.index.expressions)
try:
expr_str = expr.compile().string
except Exception:
expr_str = str(expr)
return f"<CompareIndex {self.index.name}({expr_str})>"
class CompareCheckConstraint:
def __init__(self, constraint):
self.constraint = constraint
def __eq__(self, other):
return (
isinstance(other, schema.CheckConstraint)
and self.constraint.name == other.name
and (str(self.constraint.sqltext) == str(other.sqltext))
and (other.table.name == self.constraint.table.name)
and other.table.schema == self.constraint.table.schema
)
def __ne__(self, other):
return not self.__eq__(other)
class CompareForeignKey:
def __init__(self, constraint):
self.constraint = constraint
def __eq__(self, other):
r1 = (
isinstance(other, schema.ForeignKeyConstraint)
and self.constraint.name == other.name
and (other.table.name == self.constraint.table.name)
and other.table.schema == self.constraint.table.schema
)
if not r1:
return False
for c1, c2 in zip_longest(self.constraint.columns, other.columns):
if (c1 is None and c2 is not None) or (
c2 is None and c1 is not None
):
return False
if CompareColumn(c1) != c2:
return False
return True
def __ne__(self, other):
return not self.__eq__(other)
class ComparePrimaryKey:
def __init__(self, constraint):
self.constraint = constraint
def __eq__(self, other):
r1 = (
isinstance(other, schema.PrimaryKeyConstraint)
and self.constraint.name == other.name
and (other.table.name == self.constraint.table.name)
and other.table.schema == self.constraint.table.schema
)
if not r1:
return False
for c1, c2 in zip_longest(self.constraint.columns, other.columns):
if (c1 is None and c2 is not None) or (
c2 is None and c1 is not None
):
return False
if CompareColumn(c1) != c2:
return False
return True
def __ne__(self, other):
return not self.__eq__(other)
class CompareUniqueConstraint:
def __init__(self, constraint):
self.constraint = constraint
def __eq__(self, other):
r1 = (
isinstance(other, schema.UniqueConstraint)
and self.constraint.name == other.name
and (other.table.name == self.constraint.table.name)
and other.table.schema == self.constraint.table.schema
)
if not r1:
return False
for c1, c2 in zip_longest(self.constraint.columns, other.columns):
if (c1 is None and c2 is not None) or (
c2 is None and c1 is not None
):
return False
if CompareColumn(c1) != c2:
return False
return True
def __ne__(self, other):
return not self.__eq__(other)
|
# testing/util.py
# Copyright (C) 2005-2019 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from __future__ import annotations
import types
from typing import Union
from sqlalchemy.util import inspect_getfullargspec
from ..util import sqla_2
def flag_combinations(*combinations):
"""A facade around @testing.combinations() oriented towards boolean
keyword-based arguments.
Basically generates a nice looking identifier based on the keywords
and also sets up the argument names.
E.g.::
@testing.flag_combinations(
dict(lazy=False, passive=False),
dict(lazy=True, passive=False),
dict(lazy=False, passive=True),
dict(lazy=False, passive=True, raiseload=True),
)
would result in::
@testing.combinations(
('', False, False, False),
('lazy', True, False, False),
('lazy_passive', True, True, False),
('lazy_passive', True, True, True),
id_='iaaa',
argnames='lazy,passive,raiseload'
)
"""
from sqlalchemy.testing import config
keys = set()
for d in combinations:
keys.update(d)
keys = sorted(keys)
return config.combinations(
*[
("_".join(k for k in keys if d.get(k, False)),)
+ tuple(d.get(k, False) for k in keys)
for d in combinations
],
id_="i" + ("a" * len(keys)),
argnames=",".join(keys),
)
def resolve_lambda(__fn, **kw):
"""Given a no-arg lambda and a namespace, return a new lambda that
has all the values filled in.
This is used so that we can have module-level fixtures that
refer to instance-level variables using lambdas.
"""
pos_args = inspect_getfullargspec(__fn)[0]
pass_pos_args = {arg: kw.pop(arg) for arg in pos_args}
glb = dict(__fn.__globals__)
glb.update(kw)
new_fn = types.FunctionType(__fn.__code__, glb)
return new_fn(**pass_pos_args)
def metadata_fixture(ddl="function"):
"""Provide MetaData for a pytest fixture."""
from sqlalchemy.testing import config
from . import fixture_functions
def decorate(fn):
def run_ddl(self):
from sqlalchemy import schema
metadata = self.metadata = schema.MetaData()
try:
result = fn(self, metadata)
metadata.create_all(config.db)
# TODO:
# somehow get a per-function dml erase fixture here
yield result
finally:
metadata.drop_all(config.db)
return fixture_functions.fixture(scope=ddl)(run_ddl)
return decorate
def _safe_int(value: str) -> Union[int, str]:
try:
return int(value)
except:
return value
def testing_engine(url=None, options=None, future=False):
from sqlalchemy.testing import config
from sqlalchemy.testing.engines import testing_engine
if not future:
future = getattr(config._current.options, "future_engine", False)
if not sqla_2:
kw = {"future": future} if future else {}
else:
kw = {}
return testing_engine(url, options, **kw)
|
# testing/warnings.py
# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
import warnings
from sqlalchemy import exc as sa_exc
def setup_filters():
"""Set global warning behavior for the test suite."""
warnings.resetwarnings()
warnings.filterwarnings("error", category=sa_exc.SADeprecationWarning)
warnings.filterwarnings("error", category=sa_exc.SAWarning)
# some selected deprecations...
warnings.filterwarnings("error", category=DeprecationWarning)
try:
import pytest
except ImportError:
pass
else:
warnings.filterwarnings(
"once", category=pytest.PytestDeprecationWarning
)
|
from sqlalchemy.testing import config
from sqlalchemy.testing import emits_warning
from sqlalchemy.testing import engines
from sqlalchemy.testing import exclusions
from sqlalchemy.testing import mock
from sqlalchemy.testing import provide_metadata
from sqlalchemy.testing import skip_if
from sqlalchemy.testing import uses_deprecated
from sqlalchemy.testing.config import combinations
from sqlalchemy.testing.config import fixture
from sqlalchemy.testing.config import requirements as requires
from .assertions import assert_raises
from .assertions import assert_raises_message
from .assertions import emits_python_deprecation_warning
from .assertions import eq_
from .assertions import eq_ignore_whitespace
from .assertions import expect_raises
from .assertions import expect_raises_message
from .assertions import expect_sqlalchemy_deprecated
from .assertions import expect_sqlalchemy_deprecated_20
from .assertions import expect_warnings
from .assertions import is_
from .assertions import is_false
from .assertions import is_not_
from .assertions import is_true
from .assertions import ne_
from .fixtures import TestBase
from .util import resolve_lambda
|
"""
Bootstrapper for test framework plugins.
"""
|
null |
from sqlalchemy import Column
from sqlalchemy import Float
from sqlalchemy import MetaData
from sqlalchemy import String
from sqlalchemy import Table
from ._autogen_fixtures import AutogenFixtureTest
from ...testing import eq_
from ...testing import mock
from ...testing import TestBase
class AutogenerateCommentsTest(AutogenFixtureTest, TestBase):
__backend__ = True
__requires__ = ("comments",)
def test_existing_table_comment_no_change(self):
m1 = MetaData()
m2 = MetaData()
Table(
"some_table",
m1,
Column("test", String(10), primary_key=True),
comment="this is some table",
)
Table(
"some_table",
m2,
Column("test", String(10), primary_key=True),
comment="this is some table",
)
diffs = self._fixture(m1, m2)
eq_(diffs, [])
def test_add_table_comment(self):
m1 = MetaData()
m2 = MetaData()
Table("some_table", m1, Column("test", String(10), primary_key=True))
Table(
"some_table",
m2,
Column("test", String(10), primary_key=True),
comment="this is some table",
)
diffs = self._fixture(m1, m2)
eq_(diffs[0][0], "add_table_comment")
eq_(diffs[0][1].comment, "this is some table")
eq_(diffs[0][2], None)
def test_remove_table_comment(self):
m1 = MetaData()
m2 = MetaData()
Table(
"some_table",
m1,
Column("test", String(10), primary_key=True),
comment="this is some table",
)
Table("some_table", m2, Column("test", String(10), primary_key=True))
diffs = self._fixture(m1, m2)
eq_(diffs[0][0], "remove_table_comment")
eq_(diffs[0][1].comment, None)
def test_alter_table_comment(self):
m1 = MetaData()
m2 = MetaData()
Table(
"some_table",
m1,
Column("test", String(10), primary_key=True),
comment="this is some table",
)
Table(
"some_table",
m2,
Column("test", String(10), primary_key=True),
comment="this is also some table",
)
diffs = self._fixture(m1, m2)
eq_(diffs[0][0], "add_table_comment")
eq_(diffs[0][1].comment, "this is also some table")
eq_(diffs[0][2], "this is some table")
def test_existing_column_comment_no_change(self):
m1 = MetaData()
m2 = MetaData()
Table(
"some_table",
m1,
Column("test", String(10), primary_key=True),
Column("amount", Float, comment="the amount"),
)
Table(
"some_table",
m2,
Column("test", String(10), primary_key=True),
Column("amount", Float, comment="the amount"),
)
diffs = self._fixture(m1, m2)
eq_(diffs, [])
def test_add_column_comment(self):
m1 = MetaData()
m2 = MetaData()
Table(
"some_table",
m1,
Column("test", String(10), primary_key=True),
Column("amount", Float),
)
Table(
"some_table",
m2,
Column("test", String(10), primary_key=True),
Column("amount", Float, comment="the amount"),
)
diffs = self._fixture(m1, m2)
eq_(
diffs,
[
[
(
"modify_comment",
None,
"some_table",
"amount",
{
"existing_nullable": True,
"existing_type": mock.ANY,
"existing_server_default": False,
},
None,
"the amount",
)
]
],
)
def test_remove_column_comment(self):
m1 = MetaData()
m2 = MetaData()
Table(
"some_table",
m1,
Column("test", String(10), primary_key=True),
Column("amount", Float, comment="the amount"),
)
Table(
"some_table",
m2,
Column("test", String(10), primary_key=True),
Column("amount", Float),
)
diffs = self._fixture(m1, m2)
eq_(
diffs,
[
[
(
"modify_comment",
None,
"some_table",
"amount",
{
"existing_nullable": True,
"existing_type": mock.ANY,
"existing_server_default": False,
},
"the amount",
None,
)
]
],
)
def test_alter_column_comment(self):
m1 = MetaData()
m2 = MetaData()
Table(
"some_table",
m1,
Column("test", String(10), primary_key=True),
Column("amount", Float, comment="the amount"),
)
Table(
"some_table",
m2,
Column("test", String(10), primary_key=True),
Column("amount", Float, comment="the adjusted amount"),
)
diffs = self._fixture(m1, m2)
eq_(
diffs,
[
[
(
"modify_comment",
None,
"some_table",
"amount",
{
"existing_nullable": True,
"existing_type": mock.ANY,
"existing_server_default": False,
},
"the amount",
"the adjusted amount",
)
]
],
)
|
import sqlalchemy as sa
from sqlalchemy import Column
from sqlalchemy import Integer
from sqlalchemy import MetaData
from sqlalchemy import Table
from ._autogen_fixtures import AutogenFixtureTest
from ... import testing
from ...testing import eq_
from ...testing import is_
from ...testing import is_true
from ...testing import mock
from ...testing import TestBase
class AutogenerateComputedTest(AutogenFixtureTest, TestBase):
__requires__ = ("computed_columns",)
__backend__ = True
def test_add_computed_column(self):
m1 = MetaData()
m2 = MetaData()
Table("user", m1, Column("id", Integer, primary_key=True))
Table(
"user",
m2,
Column("id", Integer, primary_key=True),
Column("foo", Integer, sa.Computed("5")),
)
diffs = self._fixture(m1, m2)
eq_(diffs[0][0], "add_column")
eq_(diffs[0][2], "user")
eq_(diffs[0][3].name, "foo")
c = diffs[0][3].computed
is_true(isinstance(c, sa.Computed))
is_(c.persisted, None)
eq_(str(c.sqltext), "5")
def test_remove_computed_column(self):
m1 = MetaData()
m2 = MetaData()
Table(
"user",
m1,
Column("id", Integer, primary_key=True),
Column("foo", Integer, sa.Computed("5")),
)
Table("user", m2, Column("id", Integer, primary_key=True))
diffs = self._fixture(m1, m2)
eq_(diffs[0][0], "remove_column")
eq_(diffs[0][2], "user")
c = diffs[0][3]
eq_(c.name, "foo")
is_true(isinstance(c.computed, sa.Computed))
is_true(isinstance(c.server_default, sa.Computed))
@testing.combinations(
lambda: (None, sa.Computed("bar*5")),
(lambda: (sa.Computed("bar*5"), None)),
lambda: (
sa.Computed("bar*5"),
sa.Computed("bar * 42", persisted=True),
),
lambda: (sa.Computed("bar*5"), sa.Computed("bar * 42")),
)
def test_cant_change_computed_warning(self, test_case):
arg_before, arg_after = testing.resolve_lambda(test_case, **locals())
m1 = MetaData()
m2 = MetaData()
arg_before = [] if arg_before is None else [arg_before]
arg_after = [] if arg_after is None else [arg_after]
Table(
"user",
m1,
Column("id", Integer, primary_key=True),
Column("bar", Integer),
Column("foo", Integer, *arg_before),
)
Table(
"user",
m2,
Column("id", Integer, primary_key=True),
Column("bar", Integer),
Column("foo", Integer, *arg_after),
)
with mock.patch("alembic.util.warn") as mock_warn:
diffs = self._fixture(m1, m2)
eq_(
mock_warn.mock_calls,
[mock.call("Computed default on user.foo cannot be modified")],
)
eq_(list(diffs), [])
@testing.combinations(
lambda: (None, None),
lambda: (sa.Computed("5"), sa.Computed("5")),
lambda: (sa.Computed("bar*5"), sa.Computed("bar*5")),
lambda: (sa.Computed("bar*5"), sa.Computed("bar * \r\n\t5")),
)
def test_computed_unchanged(self, test_case):
arg_before, arg_after = testing.resolve_lambda(test_case, **locals())
m1 = MetaData()
m2 = MetaData()
arg_before = [] if arg_before is None else [arg_before]
arg_after = [] if arg_after is None else [arg_after]
Table(
"user",
m1,
Column("id", Integer, primary_key=True),
Column("bar", Integer),
Column("foo", Integer, *arg_before),
)
Table(
"user",
m2,
Column("id", Integer, primary_key=True),
Column("bar", Integer),
Column("foo", Integer, *arg_after),
)
with mock.patch("alembic.util.warn") as mock_warn:
diffs = self._fixture(m1, m2)
eq_(mock_warn.mock_calls, [])
eq_(list(diffs), [])
|
from sqlalchemy import BigInteger
from sqlalchemy import Column
from sqlalchemy import Integer
from sqlalchemy import MetaData
from sqlalchemy import Table
from sqlalchemy.testing import in_
from ._autogen_fixtures import AutogenFixtureTest
from ... import testing
from ...testing import config
from ...testing import eq_
from ...testing import is_
from ...testing import TestBase
class AlterColumnTest(AutogenFixtureTest, TestBase):
__backend__ = True
@testing.combinations((True,), (False,))
@config.requirements.comments
def test_all_existings_filled(self, pk):
m1 = MetaData()
m2 = MetaData()
Table("a", m1, Column("x", Integer, primary_key=pk))
Table("a", m2, Column("x", Integer, comment="x", primary_key=pk))
alter_col = self._assert_alter_col(m1, m2, pk)
eq_(alter_col.modify_comment, "x")
@testing.combinations((True,), (False,))
@config.requirements.comments
def test_all_existings_filled_in_notnull(self, pk):
m1 = MetaData()
m2 = MetaData()
Table("a", m1, Column("x", Integer, nullable=False, primary_key=pk))
Table(
"a",
m2,
Column("x", Integer, nullable=False, comment="x", primary_key=pk),
)
self._assert_alter_col(m1, m2, pk, nullable=False)
@testing.combinations((True,), (False,))
@config.requirements.comments
def test_all_existings_filled_in_comment(self, pk):
m1 = MetaData()
m2 = MetaData()
Table("a", m1, Column("x", Integer, comment="old", primary_key=pk))
Table("a", m2, Column("x", Integer, comment="new", primary_key=pk))
alter_col = self._assert_alter_col(m1, m2, pk)
eq_(alter_col.existing_comment, "old")
@testing.combinations((True,), (False,))
@config.requirements.comments
def test_all_existings_filled_in_server_default(self, pk):
m1 = MetaData()
m2 = MetaData()
Table(
"a", m1, Column("x", Integer, server_default="5", primary_key=pk)
)
Table(
"a",
m2,
Column(
"x", Integer, server_default="5", comment="new", primary_key=pk
),
)
alter_col = self._assert_alter_col(m1, m2, pk)
in_("5", alter_col.existing_server_default.arg.text)
def _assert_alter_col(self, m1, m2, pk, nullable=None):
ops = self._fixture(m1, m2, return_ops=True)
modify_table = ops.ops[-1]
alter_col = modify_table.ops[0]
if nullable is None:
eq_(alter_col.existing_nullable, not pk)
else:
eq_(alter_col.existing_nullable, nullable)
assert alter_col.existing_type._compare_type_affinity(Integer())
return alter_col
class AutoincrementTest(AutogenFixtureTest, TestBase):
__backend__ = True
__requires__ = ("integer_subtype_comparisons",)
def test_alter_column_autoincrement_none(self):
m1 = MetaData()
m2 = MetaData()
Table("a", m1, Column("x", Integer, nullable=False))
Table("a", m2, Column("x", Integer, nullable=True))
ops = self._fixture(m1, m2, return_ops=True)
assert "autoincrement" not in ops.ops[0].ops[0].kw
def test_alter_column_autoincrement_pk_false(self):
m1 = MetaData()
m2 = MetaData()
Table(
"a",
m1,
Column("x", Integer, primary_key=True, autoincrement=False),
)
Table(
"a",
m2,
Column("x", BigInteger, primary_key=True, autoincrement=False),
)
ops = self._fixture(m1, m2, return_ops=True)
is_(ops.ops[0].ops[0].kw["autoincrement"], False)
def test_alter_column_autoincrement_pk_implicit_true(self):
m1 = MetaData()
m2 = MetaData()
Table("a", m1, Column("x", Integer, primary_key=True))
Table("a", m2, Column("x", BigInteger, primary_key=True))
ops = self._fixture(m1, m2, return_ops=True)
is_(ops.ops[0].ops[0].kw["autoincrement"], True)
def test_alter_column_autoincrement_pk_explicit_true(self):
m1 = MetaData()
m2 = MetaData()
Table(
"a", m1, Column("x", Integer, primary_key=True, autoincrement=True)
)
Table(
"a",
m2,
Column("x", BigInteger, primary_key=True, autoincrement=True),
)
ops = self._fixture(m1, m2, return_ops=True)
is_(ops.ops[0].ops[0].kw["autoincrement"], True)
def test_alter_column_autoincrement_nonpk_false(self):
m1 = MetaData()
m2 = MetaData()
Table(
"a",
m1,
Column("id", Integer, primary_key=True),
Column("x", Integer, autoincrement=False),
)
Table(
"a",
m2,
Column("id", Integer, primary_key=True),
Column("x", BigInteger, autoincrement=False),
)
ops = self._fixture(m1, m2, return_ops=True)
is_(ops.ops[0].ops[0].kw["autoincrement"], False)
def test_alter_column_autoincrement_nonpk_implicit_false(self):
m1 = MetaData()
m2 = MetaData()
Table(
"a",
m1,
Column("id", Integer, primary_key=True),
Column("x", Integer),
)
Table(
"a",
m2,
Column("id", Integer, primary_key=True),
Column("x", BigInteger),
)
ops = self._fixture(m1, m2, return_ops=True)
assert "autoincrement" not in ops.ops[0].ops[0].kw
def test_alter_column_autoincrement_nonpk_explicit_true(self):
m1 = MetaData()
m2 = MetaData()
Table(
"a",
m1,
Column("id", Integer, primary_key=True, autoincrement=False),
Column("x", Integer, autoincrement=True),
)
Table(
"a",
m2,
Column("id", Integer, primary_key=True, autoincrement=False),
Column("x", BigInteger, autoincrement=True),
)
ops = self._fixture(m1, m2, return_ops=True)
is_(ops.ops[0].ops[0].kw["autoincrement"], True)
def test_alter_column_autoincrement_compositepk_false(self):
m1 = MetaData()
m2 = MetaData()
Table(
"a",
m1,
Column("id", Integer, primary_key=True),
Column("x", Integer, primary_key=True, autoincrement=False),
)
Table(
"a",
m2,
Column("id", Integer, primary_key=True),
Column("x", BigInteger, primary_key=True, autoincrement=False),
)
ops = self._fixture(m1, m2, return_ops=True)
is_(ops.ops[0].ops[0].kw["autoincrement"], False)
def test_alter_column_autoincrement_compositepk_implicit_false(self):
m1 = MetaData()
m2 = MetaData()
Table(
"a",
m1,
Column("id", Integer, primary_key=True),
Column("x", Integer, primary_key=True),
)
Table(
"a",
m2,
Column("id", Integer, primary_key=True),
Column("x", BigInteger, primary_key=True),
)
ops = self._fixture(m1, m2, return_ops=True)
assert "autoincrement" not in ops.ops[0].ops[0].kw
@config.requirements.autoincrement_on_composite_pk
def test_alter_column_autoincrement_compositepk_explicit_true(self):
m1 = MetaData()
m2 = MetaData()
Table(
"a",
m1,
Column("id", Integer, primary_key=True, autoincrement=False),
Column("x", Integer, primary_key=True, autoincrement=True),
# on SQLA 1.0 and earlier, this being present
# trips the "add KEY for the primary key" so that the
# AUTO_INCREMENT keyword is accepted by MySQL. SQLA 1.1 and
# greater the columns are just reorganized.
mysql_engine="InnoDB",
)
Table(
"a",
m2,
Column("id", Integer, primary_key=True, autoincrement=False),
Column("x", BigInteger, primary_key=True, autoincrement=True),
)
ops = self._fixture(m1, m2, return_ops=True)
is_(ops.ops[0].ops[0].kw["autoincrement"], True)
|
from sqlalchemy import Column
from sqlalchemy import ForeignKeyConstraint
from sqlalchemy import Integer
from sqlalchemy import MetaData
from sqlalchemy import String
from sqlalchemy import Table
from ._autogen_fixtures import AutogenFixtureTest
from ...testing import combinations
from ...testing import config
from ...testing import eq_
from ...testing import mock
from ...testing import TestBase
class AutogenerateForeignKeysTest(AutogenFixtureTest, TestBase):
__backend__ = True
__requires__ = ("foreign_key_constraint_reflection",)
def test_remove_fk(self):
m1 = MetaData()
m2 = MetaData()
Table(
"some_table",
m1,
Column("test", String(10), primary_key=True),
)
Table(
"user",
m1,
Column("id", Integer, primary_key=True),
Column("name", String(50), nullable=False),
Column("a1", String(10), server_default="x"),
Column("test2", String(10)),
ForeignKeyConstraint(["test2"], ["some_table.test"]),
)
Table(
"some_table",
m2,
Column("test", String(10), primary_key=True),
)
Table(
"user",
m2,
Column("id", Integer, primary_key=True),
Column("name", String(50), nullable=False),
Column("a1", String(10), server_default="x"),
Column("test2", String(10)),
)
diffs = self._fixture(m1, m2)
self._assert_fk_diff(
diffs[0],
"remove_fk",
"user",
["test2"],
"some_table",
["test"],
conditional_name="servergenerated",
)
def test_add_fk(self):
m1 = MetaData()
m2 = MetaData()
Table(
"some_table",
m1,
Column("id", Integer, primary_key=True),
Column("test", String(10)),
)
Table(
"user",
m1,
Column("id", Integer, primary_key=True),
Column("name", String(50), nullable=False),
Column("a1", String(10), server_default="x"),
Column("test2", String(10)),
)
Table(
"some_table",
m2,
Column("id", Integer, primary_key=True),
Column("test", String(10)),
)
Table(
"user",
m2,
Column("id", Integer, primary_key=True),
Column("name", String(50), nullable=False),
Column("a1", String(10), server_default="x"),
Column("test2", String(10)),
ForeignKeyConstraint(["test2"], ["some_table.test"]),
)
diffs = self._fixture(m1, m2)
self._assert_fk_diff(
diffs[0], "add_fk", "user", ["test2"], "some_table", ["test"]
)
def test_no_change(self):
m1 = MetaData()
m2 = MetaData()
Table(
"some_table",
m1,
Column("id", Integer, primary_key=True),
Column("test", String(10)),
)
Table(
"user",
m1,
Column("id", Integer, primary_key=True),
Column("name", String(50), nullable=False),
Column("a1", String(10), server_default="x"),
Column("test2", Integer),
ForeignKeyConstraint(["test2"], ["some_table.id"]),
)
Table(
"some_table",
m2,
Column("id", Integer, primary_key=True),
Column("test", String(10)),
)
Table(
"user",
m2,
Column("id", Integer, primary_key=True),
Column("name", String(50), nullable=False),
Column("a1", String(10), server_default="x"),
Column("test2", Integer),
ForeignKeyConstraint(["test2"], ["some_table.id"]),
)
diffs = self._fixture(m1, m2)
eq_(diffs, [])
def test_no_change_composite_fk(self):
m1 = MetaData()
m2 = MetaData()
Table(
"some_table",
m1,
Column("id_1", String(10), primary_key=True),
Column("id_2", String(10), primary_key=True),
)
Table(
"user",
m1,
Column("id", Integer, primary_key=True),
Column("name", String(50), nullable=False),
Column("a1", String(10), server_default="x"),
Column("other_id_1", String(10)),
Column("other_id_2", String(10)),
ForeignKeyConstraint(
["other_id_1", "other_id_2"],
["some_table.id_1", "some_table.id_2"],
),
)
Table(
"some_table",
m2,
Column("id_1", String(10), primary_key=True),
Column("id_2", String(10), primary_key=True),
)
Table(
"user",
m2,
Column("id", Integer, primary_key=True),
Column("name", String(50), nullable=False),
Column("a1", String(10), server_default="x"),
Column("other_id_1", String(10)),
Column("other_id_2", String(10)),
ForeignKeyConstraint(
["other_id_1", "other_id_2"],
["some_table.id_1", "some_table.id_2"],
),
)
diffs = self._fixture(m1, m2)
eq_(diffs, [])
def test_casing_convention_changed_so_put_drops_first(self):
m1 = MetaData()
m2 = MetaData()
Table(
"some_table",
m1,
Column("test", String(10), primary_key=True),
)
Table(
"user",
m1,
Column("id", Integer, primary_key=True),
Column("name", String(50), nullable=False),
Column("a1", String(10), server_default="x"),
Column("test2", String(10)),
ForeignKeyConstraint(["test2"], ["some_table.test"], name="MyFK"),
)
Table(
"some_table",
m2,
Column("test", String(10), primary_key=True),
)
# foreign key autogen currently does not take "name" into account,
# so change the def just for the purposes of testing the
# add/drop order for now.
Table(
"user",
m2,
Column("id", Integer, primary_key=True),
Column("name", String(50), nullable=False),
Column("a1", String(10), server_default="x"),
Column("test2", String(10)),
ForeignKeyConstraint(["a1"], ["some_table.test"], name="myfk"),
)
diffs = self._fixture(m1, m2)
self._assert_fk_diff(
diffs[0],
"remove_fk",
"user",
["test2"],
"some_table",
["test"],
name="MyFK" if config.requirements.fk_names.enabled else None,
)
self._assert_fk_diff(
diffs[1],
"add_fk",
"user",
["a1"],
"some_table",
["test"],
name="myfk",
)
def test_add_composite_fk_with_name(self):
m1 = MetaData()
m2 = MetaData()
Table(
"some_table",
m1,
Column("id_1", String(10), primary_key=True),
Column("id_2", String(10), primary_key=True),
)
Table(
"user",
m1,
Column("id", Integer, primary_key=True),
Column("name", String(50), nullable=False),
Column("a1", String(10), server_default="x"),
Column("other_id_1", String(10)),
Column("other_id_2", String(10)),
)
Table(
"some_table",
m2,
Column("id_1", String(10), primary_key=True),
Column("id_2", String(10), primary_key=True),
)
Table(
"user",
m2,
Column("id", Integer, primary_key=True),
Column("name", String(50), nullable=False),
Column("a1", String(10), server_default="x"),
Column("other_id_1", String(10)),
Column("other_id_2", String(10)),
ForeignKeyConstraint(
["other_id_1", "other_id_2"],
["some_table.id_1", "some_table.id_2"],
name="fk_test_name",
),
)
diffs = self._fixture(m1, m2)
self._assert_fk_diff(
diffs[0],
"add_fk",
"user",
["other_id_1", "other_id_2"],
"some_table",
["id_1", "id_2"],
name="fk_test_name",
)
@config.requirements.no_name_normalize
def test_remove_composite_fk(self):
m1 = MetaData()
m2 = MetaData()
Table(
"some_table",
m1,
Column("id_1", String(10), primary_key=True),
Column("id_2", String(10), primary_key=True),
)
Table(
"user",
m1,
Column("id", Integer, primary_key=True),
Column("name", String(50), nullable=False),
Column("a1", String(10), server_default="x"),
Column("other_id_1", String(10)),
Column("other_id_2", String(10)),
ForeignKeyConstraint(
["other_id_1", "other_id_2"],
["some_table.id_1", "some_table.id_2"],
name="fk_test_name",
),
)
Table(
"some_table",
m2,
Column("id_1", String(10), primary_key=True),
Column("id_2", String(10), primary_key=True),
)
Table(
"user",
m2,
Column("id", Integer, primary_key=True),
Column("name", String(50), nullable=False),
Column("a1", String(10), server_default="x"),
Column("other_id_1", String(10)),
Column("other_id_2", String(10)),
)
diffs = self._fixture(m1, m2)
self._assert_fk_diff(
diffs[0],
"remove_fk",
"user",
["other_id_1", "other_id_2"],
"some_table",
["id_1", "id_2"],
conditional_name="fk_test_name",
)
def test_add_fk_colkeys(self):
m1 = MetaData()
m2 = MetaData()
Table(
"some_table",
m1,
Column("id_1", String(10), primary_key=True),
Column("id_2", String(10), primary_key=True),
)
Table(
"user",
m1,
Column("id", Integer, primary_key=True),
Column("other_id_1", String(10)),
Column("other_id_2", String(10)),
)
Table(
"some_table",
m2,
Column("id_1", String(10), key="tid1", primary_key=True),
Column("id_2", String(10), key="tid2", primary_key=True),
)
Table(
"user",
m2,
Column("id", Integer, primary_key=True),
Column("other_id_1", String(10), key="oid1"),
Column("other_id_2", String(10), key="oid2"),
ForeignKeyConstraint(
["oid1", "oid2"],
["some_table.tid1", "some_table.tid2"],
name="fk_test_name",
),
)
diffs = self._fixture(m1, m2)
self._assert_fk_diff(
diffs[0],
"add_fk",
"user",
["other_id_1", "other_id_2"],
"some_table",
["id_1", "id_2"],
name="fk_test_name",
)
def test_no_change_colkeys(self):
m1 = MetaData()
m2 = MetaData()
Table(
"some_table",
m1,
Column("id_1", String(10), primary_key=True),
Column("id_2", String(10), primary_key=True),
)
Table(
"user",
m1,
Column("id", Integer, primary_key=True),
Column("other_id_1", String(10)),
Column("other_id_2", String(10)),
ForeignKeyConstraint(
["other_id_1", "other_id_2"],
["some_table.id_1", "some_table.id_2"],
),
)
Table(
"some_table",
m2,
Column("id_1", String(10), key="tid1", primary_key=True),
Column("id_2", String(10), key="tid2", primary_key=True),
)
Table(
"user",
m2,
Column("id", Integer, primary_key=True),
Column("other_id_1", String(10), key="oid1"),
Column("other_id_2", String(10), key="oid2"),
ForeignKeyConstraint(
["oid1", "oid2"], ["some_table.tid1", "some_table.tid2"]
),
)
diffs = self._fixture(m1, m2)
eq_(diffs, [])
class IncludeHooksTest(AutogenFixtureTest, TestBase):
__backend__ = True
__requires__ = ("fk_names",)
@combinations(("object",), ("name",))
@config.requirements.no_name_normalize
def test_remove_connection_fk(self, hook_type):
m1 = MetaData()
m2 = MetaData()
ref = Table(
"ref",
m1,
Column("id", Integer, primary_key=True),
)
t1 = Table(
"t",
m1,
Column("x", Integer),
Column("y", Integer),
)
t1.append_constraint(
ForeignKeyConstraint([t1.c.x], [ref.c.id], name="fk1")
)
t1.append_constraint(
ForeignKeyConstraint([t1.c.y], [ref.c.id], name="fk2")
)
ref = Table(
"ref",
m2,
Column("id", Integer, primary_key=True),
)
Table(
"t",
m2,
Column("x", Integer),
Column("y", Integer),
)
if hook_type == "object":
def include_object(object_, name, type_, reflected, compare_to):
return not (
isinstance(object_, ForeignKeyConstraint)
and type_ == "foreign_key_constraint"
and reflected
and name == "fk1"
)
diffs = self._fixture(m1, m2, object_filters=include_object)
elif hook_type == "name":
def include_name(name, type_, parent_names):
if name == "fk1":
if type_ == "index": # MariaDB thing
return True
eq_(type_, "foreign_key_constraint")
eq_(
parent_names,
{
"schema_name": None,
"table_name": "t",
"schema_qualified_table_name": "t",
},
)
return False
else:
return True
diffs = self._fixture(m1, m2, name_filters=include_name)
self._assert_fk_diff(
diffs[0],
"remove_fk",
"t",
["y"],
"ref",
["id"],
conditional_name="fk2",
)
eq_(len(diffs), 1)
def test_add_metadata_fk(self):
m1 = MetaData()
m2 = MetaData()
Table(
"ref",
m1,
Column("id", Integer, primary_key=True),
)
Table(
"t",
m1,
Column("x", Integer),
Column("y", Integer),
)
ref = Table(
"ref",
m2,
Column("id", Integer, primary_key=True),
)
t2 = Table(
"t",
m2,
Column("x", Integer),
Column("y", Integer),
)
t2.append_constraint(
ForeignKeyConstraint([t2.c.x], [ref.c.id], name="fk1")
)
t2.append_constraint(
ForeignKeyConstraint([t2.c.y], [ref.c.id], name="fk2")
)
def include_object(object_, name, type_, reflected, compare_to):
return not (
isinstance(object_, ForeignKeyConstraint)
and type_ == "foreign_key_constraint"
and not reflected
and name == "fk1"
)
diffs = self._fixture(m1, m2, object_filters=include_object)
self._assert_fk_diff(
diffs[0], "add_fk", "t", ["y"], "ref", ["id"], name="fk2"
)
eq_(len(diffs), 1)
@combinations(("object",), ("name",))
@config.requirements.no_name_normalize
def test_change_fk(self, hook_type):
m1 = MetaData()
m2 = MetaData()
r1a = Table(
"ref_a",
m1,
Column("a", Integer, primary_key=True),
)
Table(
"ref_b",
m1,
Column("a", Integer, primary_key=True),
Column("b", Integer, primary_key=True),
)
t1 = Table(
"t",
m1,
Column("x", Integer),
Column("y", Integer),
Column("z", Integer),
)
t1.append_constraint(
ForeignKeyConstraint([t1.c.x], [r1a.c.a], name="fk1")
)
t1.append_constraint(
ForeignKeyConstraint([t1.c.y], [r1a.c.a], name="fk2")
)
Table(
"ref_a",
m2,
Column("a", Integer, primary_key=True),
)
r2b = Table(
"ref_b",
m2,
Column("a", Integer, primary_key=True),
Column("b", Integer, primary_key=True),
)
t2 = Table(
"t",
m2,
Column("x", Integer),
Column("y", Integer),
Column("z", Integer),
)
t2.append_constraint(
ForeignKeyConstraint(
[t2.c.x, t2.c.z], [r2b.c.a, r2b.c.b], name="fk1"
)
)
t2.append_constraint(
ForeignKeyConstraint(
[t2.c.y, t2.c.z], [r2b.c.a, r2b.c.b], name="fk2"
)
)
if hook_type == "object":
def include_object(object_, name, type_, reflected, compare_to):
return not (
isinstance(object_, ForeignKeyConstraint)
and type_ == "foreign_key_constraint"
and name == "fk1"
)
diffs = self._fixture(m1, m2, object_filters=include_object)
elif hook_type == "name":
def include_name(name, type_, parent_names):
if type_ == "index":
return True # MariaDB thing
if name == "fk1":
eq_(type_, "foreign_key_constraint")
eq_(
parent_names,
{
"schema_name": None,
"table_name": "t",
"schema_qualified_table_name": "t",
},
)
return False
else:
return True
diffs = self._fixture(m1, m2, name_filters=include_name)
if hook_type == "object":
self._assert_fk_diff(
diffs[0], "remove_fk", "t", ["y"], "ref_a", ["a"], name="fk2"
)
self._assert_fk_diff(
diffs[1],
"add_fk",
"t",
["y", "z"],
"ref_b",
["a", "b"],
name="fk2",
)
eq_(len(diffs), 2)
elif hook_type == "name":
eq_(
{(d[0], d[1].name) for d in diffs},
{("add_fk", "fk2"), ("add_fk", "fk1"), ("remove_fk", "fk2")},
)
class AutogenerateFKOptionsTest(AutogenFixtureTest, TestBase):
__backend__ = True
def _fk_opts_fixture(self, old_opts, new_opts):
m1 = MetaData()
m2 = MetaData()
Table(
"some_table",
m1,
Column("id", Integer, primary_key=True),
Column("test", String(10)),
)
Table(
"user",
m1,
Column("id", Integer, primary_key=True),
Column("name", String(50), nullable=False),
Column("tid", Integer),
ForeignKeyConstraint(["tid"], ["some_table.id"], **old_opts),
)
Table(
"some_table",
m2,
Column("id", Integer, primary_key=True),
Column("test", String(10)),
)
Table(
"user",
m2,
Column("id", Integer, primary_key=True),
Column("name", String(50), nullable=False),
Column("tid", Integer),
ForeignKeyConstraint(["tid"], ["some_table.id"], **new_opts),
)
return self._fixture(m1, m2)
@config.requirements.fk_ondelete_is_reflected
def test_add_ondelete(self):
diffs = self._fk_opts_fixture({}, {"ondelete": "cascade"})
self._assert_fk_diff(
diffs[0],
"remove_fk",
"user",
["tid"],
"some_table",
["id"],
ondelete=None,
conditional_name="servergenerated",
)
self._assert_fk_diff(
diffs[1],
"add_fk",
"user",
["tid"],
"some_table",
["id"],
ondelete="cascade",
)
@config.requirements.fk_ondelete_is_reflected
def test_remove_ondelete(self):
diffs = self._fk_opts_fixture({"ondelete": "CASCADE"}, {})
self._assert_fk_diff(
diffs[0],
"remove_fk",
"user",
["tid"],
"some_table",
["id"],
ondelete="CASCADE",
conditional_name="servergenerated",
)
self._assert_fk_diff(
diffs[1],
"add_fk",
"user",
["tid"],
"some_table",
["id"],
ondelete=None,
)
def test_nochange_ondelete(self):
"""test case sensitivity"""
diffs = self._fk_opts_fixture(
{"ondelete": "caSCAde"}, {"ondelete": "CasCade"}
)
eq_(diffs, [])
@config.requirements.fk_onupdate_is_reflected
def test_add_onupdate(self):
diffs = self._fk_opts_fixture({}, {"onupdate": "cascade"})
self._assert_fk_diff(
diffs[0],
"remove_fk",
"user",
["tid"],
"some_table",
["id"],
onupdate=None,
conditional_name="servergenerated",
)
self._assert_fk_diff(
diffs[1],
"add_fk",
"user",
["tid"],
"some_table",
["id"],
onupdate="cascade",
)
@config.requirements.fk_onupdate_is_reflected
def test_remove_onupdate(self):
diffs = self._fk_opts_fixture({"onupdate": "CASCADE"}, {})
self._assert_fk_diff(
diffs[0],
"remove_fk",
"user",
["tid"],
"some_table",
["id"],
onupdate="CASCADE",
conditional_name="servergenerated",
)
self._assert_fk_diff(
diffs[1],
"add_fk",
"user",
["tid"],
"some_table",
["id"],
onupdate=None,
)
@config.requirements.fk_onupdate
def test_nochange_onupdate(self):
"""test case sensitivity"""
diffs = self._fk_opts_fixture(
{"onupdate": "caSCAde"}, {"onupdate": "CasCade"}
)
eq_(diffs, [])
@config.requirements.fk_ondelete_restrict
def test_nochange_ondelete_restrict(self):
"""test the RESTRICT option which MySQL doesn't report on"""
diffs = self._fk_opts_fixture(
{"ondelete": "restrict"}, {"ondelete": "restrict"}
)
eq_(diffs, [])
@config.requirements.fk_onupdate_restrict
def test_nochange_onupdate_restrict(self):
"""test the RESTRICT option which MySQL doesn't report on"""
diffs = self._fk_opts_fixture(
{"onupdate": "restrict"}, {"onupdate": "restrict"}
)
eq_(diffs, [])
@config.requirements.fk_ondelete_noaction
def test_nochange_ondelete_noaction(self):
"""test the NO ACTION option which generally comes back as None"""
diffs = self._fk_opts_fixture(
{"ondelete": "no action"}, {"ondelete": "no action"}
)
eq_(diffs, [])
@config.requirements.fk_onupdate
def test_nochange_onupdate_noaction(self):
"""test the NO ACTION option which generally comes back as None"""
diffs = self._fk_opts_fixture(
{"onupdate": "no action"}, {"onupdate": "no action"}
)
eq_(diffs, [])
@config.requirements.fk_ondelete_restrict
def test_change_ondelete_from_restrict(self):
"""test the RESTRICT option which MySQL doesn't report on"""
# note that this is impossible to detect if we change
# from RESTRICT to NO ACTION on MySQL.
diffs = self._fk_opts_fixture(
{"ondelete": "restrict"}, {"ondelete": "cascade"}
)
self._assert_fk_diff(
diffs[0],
"remove_fk",
"user",
["tid"],
"some_table",
["id"],
onupdate=None,
ondelete=mock.ANY, # MySQL reports None, PG reports RESTRICT
conditional_name="servergenerated",
)
self._assert_fk_diff(
diffs[1],
"add_fk",
"user",
["tid"],
"some_table",
["id"],
onupdate=None,
ondelete="cascade",
)
@config.requirements.fk_ondelete_restrict
def test_change_onupdate_from_restrict(self):
"""test the RESTRICT option which MySQL doesn't report on"""
# note that this is impossible to detect if we change
# from RESTRICT to NO ACTION on MySQL.
diffs = self._fk_opts_fixture(
{"onupdate": "restrict"}, {"onupdate": "cascade"}
)
self._assert_fk_diff(
diffs[0],
"remove_fk",
"user",
["tid"],
"some_table",
["id"],
onupdate=mock.ANY, # MySQL reports None, PG reports RESTRICT
ondelete=None,
conditional_name="servergenerated",
)
self._assert_fk_diff(
diffs[1],
"add_fk",
"user",
["tid"],
"some_table",
["id"],
onupdate="cascade",
ondelete=None,
)
@config.requirements.fk_ondelete_is_reflected
@config.requirements.fk_onupdate_is_reflected
def test_ondelete_onupdate_combo(self):
diffs = self._fk_opts_fixture(
{"onupdate": "CASCADE", "ondelete": "SET NULL"},
{"onupdate": "RESTRICT", "ondelete": "RESTRICT"},
)
self._assert_fk_diff(
diffs[0],
"remove_fk",
"user",
["tid"],
"some_table",
["id"],
onupdate="CASCADE",
ondelete="SET NULL",
conditional_name="servergenerated",
)
self._assert_fk_diff(
diffs[1],
"add_fk",
"user",
["tid"],
"some_table",
["id"],
onupdate="RESTRICT",
ondelete="RESTRICT",
)
@config.requirements.fk_initially
def test_add_initially_deferred(self):
diffs = self._fk_opts_fixture({}, {"initially": "deferred"})
self._assert_fk_diff(
diffs[0],
"remove_fk",
"user",
["tid"],
"some_table",
["id"],
initially=None,
conditional_name="servergenerated",
)
self._assert_fk_diff(
diffs[1],
"add_fk",
"user",
["tid"],
"some_table",
["id"],
initially="deferred",
)
@config.requirements.fk_initially
def test_remove_initially_deferred(self):
diffs = self._fk_opts_fixture({"initially": "deferred"}, {})
self._assert_fk_diff(
diffs[0],
"remove_fk",
"user",
["tid"],
"some_table",
["id"],
initially="DEFERRED",
deferrable=True,
conditional_name="servergenerated",
)
self._assert_fk_diff(
diffs[1],
"add_fk",
"user",
["tid"],
"some_table",
["id"],
initially=None,
)
@config.requirements.fk_deferrable
@config.requirements.fk_initially
def test_add_initially_immediate_plus_deferrable(self):
diffs = self._fk_opts_fixture(
{}, {"initially": "immediate", "deferrable": True}
)
self._assert_fk_diff(
diffs[0],
"remove_fk",
"user",
["tid"],
"some_table",
["id"],
initially=None,
conditional_name="servergenerated",
)
self._assert_fk_diff(
diffs[1],
"add_fk",
"user",
["tid"],
"some_table",
["id"],
initially="immediate",
deferrable=True,
)
@config.requirements.fk_deferrable
@config.requirements.fk_initially
def test_remove_initially_immediate_plus_deferrable(self):
diffs = self._fk_opts_fixture(
{"initially": "immediate", "deferrable": True}, {}
)
self._assert_fk_diff(
diffs[0],
"remove_fk",
"user",
["tid"],
"some_table",
["id"],
initially=None, # immediate is the default
deferrable=True,
conditional_name="servergenerated",
)
self._assert_fk_diff(
diffs[1],
"add_fk",
"user",
["tid"],
"some_table",
["id"],
initially=None,
deferrable=None,
)
@config.requirements.fk_initially
@config.requirements.fk_deferrable
def test_add_initially_deferrable_nochange_one(self):
diffs = self._fk_opts_fixture(
{"deferrable": True, "initially": "immediate"},
{"deferrable": True, "initially": "immediate"},
)
eq_(diffs, [])
@config.requirements.fk_initially
@config.requirements.fk_deferrable
def test_add_initially_deferrable_nochange_two(self):
diffs = self._fk_opts_fixture(
{"deferrable": True, "initially": "deferred"},
{"deferrable": True, "initially": "deferred"},
)
eq_(diffs, [])
@config.requirements.fk_initially
@config.requirements.fk_deferrable
def test_add_initially_deferrable_nochange_three(self):
diffs = self._fk_opts_fixture(
{"deferrable": None, "initially": "deferred"},
{"deferrable": None, "initially": "deferred"},
)
eq_(diffs, [])
@config.requirements.fk_deferrable
def test_add_deferrable(self):
diffs = self._fk_opts_fixture({}, {"deferrable": True})
self._assert_fk_diff(
diffs[0],
"remove_fk",
"user",
["tid"],
"some_table",
["id"],
deferrable=None,
conditional_name="servergenerated",
)
self._assert_fk_diff(
diffs[1],
"add_fk",
"user",
["tid"],
"some_table",
["id"],
deferrable=True,
)
@config.requirements.fk_deferrable_is_reflected
def test_remove_deferrable(self):
diffs = self._fk_opts_fixture({"deferrable": True}, {})
self._assert_fk_diff(
diffs[0],
"remove_fk",
"user",
["tid"],
"some_table",
["id"],
deferrable=True,
conditional_name="servergenerated",
)
self._assert_fk_diff(
diffs[1],
"add_fk",
"user",
["tid"],
"some_table",
["id"],
deferrable=None,
)
|
import sqlalchemy as sa
from sqlalchemy import Column
from sqlalchemy import Integer
from sqlalchemy import MetaData
from sqlalchemy import Table
from alembic.util import sqla_compat
from ._autogen_fixtures import AutogenFixtureTest
from ... import testing
from ...testing import config
from ...testing import eq_
from ...testing import is_true
from ...testing import TestBase
class AutogenerateIdentityTest(AutogenFixtureTest, TestBase):
__requires__ = ("identity_columns",)
__backend__ = True
def test_add_identity_column(self):
m1 = MetaData()
m2 = MetaData()
Table("user", m1, Column("other", sa.Text))
Table(
"user",
m2,
Column("other", sa.Text),
Column(
"id",
Integer,
sa.Identity(start=5, increment=7),
primary_key=True,
),
)
diffs = self._fixture(m1, m2)
eq_(diffs[0][0], "add_column")
eq_(diffs[0][2], "user")
eq_(diffs[0][3].name, "id")
i = diffs[0][3].identity
is_true(isinstance(i, sa.Identity))
eq_(i.start, 5)
eq_(i.increment, 7)
def test_remove_identity_column(self):
m1 = MetaData()
m2 = MetaData()
Table(
"user",
m1,
Column(
"id",
Integer,
sa.Identity(start=2, increment=3),
primary_key=True,
),
)
Table("user", m2)
diffs = self._fixture(m1, m2)
eq_(diffs[0][0], "remove_column")
eq_(diffs[0][2], "user")
c = diffs[0][3]
eq_(c.name, "id")
is_true(isinstance(c.identity, sa.Identity))
eq_(c.identity.start, 2)
eq_(c.identity.increment, 3)
def test_no_change_identity_column(self):
m1 = MetaData()
m2 = MetaData()
for m in (m1, m2):
id_ = sa.Identity(start=2)
Table("user", m, Column("id", Integer, id_))
diffs = self._fixture(m1, m2)
eq_(diffs, [])
def test_dialect_kwargs_changes(self):
m1 = MetaData()
m2 = MetaData()
if sqla_compat.identity_has_dialect_kwargs:
args = {"oracle_on_null": True, "oracle_order": True}
else:
args = {"on_null": True, "order": True}
Table("user", m1, Column("id", Integer, sa.Identity(start=2)))
id_ = sa.Identity(start=2, **args)
Table("user", m2, Column("id", Integer, id_))
diffs = self._fixture(m1, m2)
if config.db.name == "oracle":
is_true(len(diffs), 1)
eq_(diffs[0][0][0], "modify_default")
else:
eq_(diffs, [])
@testing.combinations(
(None, dict(start=2)),
(dict(start=2), None),
(dict(start=2), dict(start=2, increment=7)),
(dict(always=False), dict(always=True)),
(
dict(start=1, minvalue=0, maxvalue=100, cycle=True),
dict(start=1, minvalue=0, maxvalue=100, cycle=False),
),
(
dict(start=10, increment=3, maxvalue=9999),
dict(start=10, increment=1, maxvalue=3333),
),
)
@config.requirements.identity_columns_alter
def test_change_identity(self, before, after):
arg_before = (sa.Identity(**before),) if before else ()
arg_after = (sa.Identity(**after),) if after else ()
m1 = MetaData()
m2 = MetaData()
Table(
"user",
m1,
Column("id", Integer, *arg_before),
Column("other", sa.Text),
)
Table(
"user",
m2,
Column("id", Integer, *arg_after),
Column("other", sa.Text),
)
diffs = self._fixture(m1, m2)
eq_(len(diffs[0]), 1)
diffs = diffs[0][0]
eq_(diffs[0], "modify_default")
eq_(diffs[2], "user")
eq_(diffs[3], "id")
old = diffs[5]
new = diffs[6]
def check(kw, idt):
if kw:
is_true(isinstance(idt, sa.Identity))
for k, v in kw.items():
eq_(getattr(idt, k), v)
else:
is_true(idt in (None, False))
check(before, old)
check(after, new)
def test_add_identity_to_column(self):
m1 = MetaData()
m2 = MetaData()
Table(
"user",
m1,
Column("id", Integer),
Column("other", sa.Text),
)
Table(
"user",
m2,
Column("id", Integer, sa.Identity(start=2, maxvalue=1000)),
Column("other", sa.Text),
)
diffs = self._fixture(m1, m2)
eq_(len(diffs[0]), 1)
diffs = diffs[0][0]
eq_(diffs[0], "modify_default")
eq_(diffs[2], "user")
eq_(diffs[3], "id")
eq_(diffs[5], None)
added = diffs[6]
is_true(isinstance(added, sa.Identity))
eq_(added.start, 2)
eq_(added.maxvalue, 1000)
def test_remove_identity_from_column(self):
m1 = MetaData()
m2 = MetaData()
Table(
"user",
m1,
Column("id", Integer, sa.Identity(start=2, maxvalue=1000)),
Column("other", sa.Text),
)
Table(
"user",
m2,
Column("id", Integer),
Column("other", sa.Text),
)
diffs = self._fixture(m1, m2)
eq_(len(diffs[0]), 1)
diffs = diffs[0][0]
eq_(diffs[0], "modify_default")
eq_(diffs[2], "user")
eq_(diffs[3], "id")
eq_(diffs[6], None)
removed = diffs[5]
is_true(isinstance(removed, sa.Identity))
|
import io
from ...migration import MigrationContext
from ...testing import assert_raises
from ...testing import config
from ...testing import eq_
from ...testing import is_
from ...testing import is_false
from ...testing import is_not_
from ...testing import is_true
from ...testing import ne_
from ...testing.fixtures import TestBase
class MigrationTransactionTest(TestBase):
__backend__ = True
conn = None
def _fixture(self, opts):
self.conn = conn = config.db.connect()
if opts.get("as_sql", False):
self.context = MigrationContext.configure(
dialect=conn.dialect, opts=opts
)
self.context.output_buffer = self.context.impl.output_buffer = (
io.StringIO()
)
else:
self.context = MigrationContext.configure(
connection=conn, opts=opts
)
return self.context
def teardown_method(self):
if self.conn:
self.conn.close()
def test_proxy_transaction_rollback(self):
context = self._fixture(
{"transaction_per_migration": True, "transactional_ddl": True}
)
is_false(self.conn.in_transaction())
proxy = context.begin_transaction(_per_migration=True)
is_true(self.conn.in_transaction())
proxy.rollback()
is_false(self.conn.in_transaction())
def test_proxy_transaction_commit(self):
context = self._fixture(
{"transaction_per_migration": True, "transactional_ddl": True}
)
proxy = context.begin_transaction(_per_migration=True)
is_true(self.conn.in_transaction())
proxy.commit()
is_false(self.conn.in_transaction())
def test_proxy_transaction_contextmanager_commit(self):
context = self._fixture(
{"transaction_per_migration": True, "transactional_ddl": True}
)
proxy = context.begin_transaction(_per_migration=True)
is_true(self.conn.in_transaction())
with proxy:
pass
is_false(self.conn.in_transaction())
def test_proxy_transaction_contextmanager_rollback(self):
context = self._fixture(
{"transaction_per_migration": True, "transactional_ddl": True}
)
proxy = context.begin_transaction(_per_migration=True)
is_true(self.conn.in_transaction())
def go():
with proxy:
raise Exception("hi")
assert_raises(Exception, go)
is_false(self.conn.in_transaction())
def test_proxy_transaction_contextmanager_explicit_rollback(self):
context = self._fixture(
{"transaction_per_migration": True, "transactional_ddl": True}
)
proxy = context.begin_transaction(_per_migration=True)
is_true(self.conn.in_transaction())
with proxy:
is_true(self.conn.in_transaction())
proxy.rollback()
is_false(self.conn.in_transaction())
is_false(self.conn.in_transaction())
def test_proxy_transaction_contextmanager_explicit_commit(self):
context = self._fixture(
{"transaction_per_migration": True, "transactional_ddl": True}
)
proxy = context.begin_transaction(_per_migration=True)
is_true(self.conn.in_transaction())
with proxy:
is_true(self.conn.in_transaction())
proxy.commit()
is_false(self.conn.in_transaction())
is_false(self.conn.in_transaction())
def test_transaction_per_migration_transactional_ddl(self):
context = self._fixture(
{"transaction_per_migration": True, "transactional_ddl": True}
)
is_false(self.conn.in_transaction())
with context.begin_transaction():
is_false(self.conn.in_transaction())
with context.begin_transaction(_per_migration=True):
is_true(self.conn.in_transaction())
is_false(self.conn.in_transaction())
is_false(self.conn.in_transaction())
def test_transaction_per_migration_non_transactional_ddl(self):
context = self._fixture(
{"transaction_per_migration": True, "transactional_ddl": False}
)
is_false(self.conn.in_transaction())
with context.begin_transaction():
is_false(self.conn.in_transaction())
with context.begin_transaction(_per_migration=True):
is_true(self.conn.in_transaction())
is_false(self.conn.in_transaction())
is_false(self.conn.in_transaction())
def test_transaction_per_all_transactional_ddl(self):
context = self._fixture({"transactional_ddl": True})
is_false(self.conn.in_transaction())
with context.begin_transaction():
is_true(self.conn.in_transaction())
with context.begin_transaction(_per_migration=True):
is_true(self.conn.in_transaction())
is_true(self.conn.in_transaction())
is_false(self.conn.in_transaction())
def test_transaction_per_all_non_transactional_ddl(self):
context = self._fixture({"transactional_ddl": False})
is_false(self.conn.in_transaction())
with context.begin_transaction():
is_false(self.conn.in_transaction())
with context.begin_transaction(_per_migration=True):
is_true(self.conn.in_transaction())
is_false(self.conn.in_transaction())
is_false(self.conn.in_transaction())
def test_transaction_per_all_sqlmode(self):
context = self._fixture({"as_sql": True})
context.execute("step 1")
with context.begin_transaction():
context.execute("step 2")
with context.begin_transaction(_per_migration=True):
context.execute("step 3")
context.execute("step 4")
context.execute("step 5")
if context.impl.transactional_ddl:
self._assert_impl_steps(
"step 1",
"BEGIN",
"step 2",
"step 3",
"step 4",
"COMMIT",
"step 5",
)
else:
self._assert_impl_steps(
"step 1", "step 2", "step 3", "step 4", "step 5"
)
def test_transaction_per_migration_sqlmode(self):
context = self._fixture(
{"as_sql": True, "transaction_per_migration": True}
)
context.execute("step 1")
with context.begin_transaction():
context.execute("step 2")
with context.begin_transaction(_per_migration=True):
context.execute("step 3")
context.execute("step 4")
context.execute("step 5")
if context.impl.transactional_ddl:
self._assert_impl_steps(
"step 1",
"step 2",
"BEGIN",
"step 3",
"COMMIT",
"step 4",
"step 5",
)
else:
self._assert_impl_steps(
"step 1", "step 2", "step 3", "step 4", "step 5"
)
@config.requirements.autocommit_isolation
def test_autocommit_block(self):
context = self._fixture({"transaction_per_migration": True})
is_false(self.conn.in_transaction())
with context.begin_transaction():
is_false(self.conn.in_transaction())
with context.begin_transaction(_per_migration=True):
is_true(self.conn.in_transaction())
with context.autocommit_block():
# in 1.x, self.conn is separate due to the
# execution_options call. however for future they are the
# same connection and there is a "transaction" block
# despite autocommit
if self.is_sqlalchemy_future:
is_(context.connection, self.conn)
else:
is_not_(context.connection, self.conn)
is_false(self.conn.in_transaction())
eq_(
context.connection._execution_options[
"isolation_level"
],
"AUTOCOMMIT",
)
ne_(
context.connection._execution_options.get(
"isolation_level", None
),
"AUTOCOMMIT",
)
is_true(self.conn.in_transaction())
is_false(self.conn.in_transaction())
is_false(self.conn.in_transaction())
@config.requirements.autocommit_isolation
def test_autocommit_block_no_transaction(self):
context = self._fixture({"transaction_per_migration": True})
is_false(self.conn.in_transaction())
with context.autocommit_block():
is_true(context.connection.in_transaction())
# in 1.x, self.conn is separate due to the execution_options
# call. however for future they are the same connection and there
# is a "transaction" block despite autocommit
if self.is_sqlalchemy_future:
is_(context.connection, self.conn)
else:
is_not_(context.connection, self.conn)
is_false(self.conn.in_transaction())
eq_(
context.connection._execution_options["isolation_level"],
"AUTOCOMMIT",
)
ne_(
context.connection._execution_options.get("isolation_level", None),
"AUTOCOMMIT",
)
is_false(self.conn.in_transaction())
def test_autocommit_block_transactional_ddl_sqlmode(self):
context = self._fixture(
{
"transaction_per_migration": True,
"transactional_ddl": True,
"as_sql": True,
}
)
with context.begin_transaction():
context.execute("step 1")
with context.begin_transaction(_per_migration=True):
context.execute("step 2")
with context.autocommit_block():
context.execute("step 3")
context.execute("step 4")
context.execute("step 5")
self._assert_impl_steps(
"step 1",
"BEGIN",
"step 2",
"COMMIT",
"step 3",
"BEGIN",
"step 4",
"COMMIT",
"step 5",
)
def test_autocommit_block_nontransactional_ddl_sqlmode(self):
context = self._fixture(
{
"transaction_per_migration": True,
"transactional_ddl": False,
"as_sql": True,
}
)
with context.begin_transaction():
context.execute("step 1")
with context.begin_transaction(_per_migration=True):
context.execute("step 2")
with context.autocommit_block():
context.execute("step 3")
context.execute("step 4")
context.execute("step 5")
self._assert_impl_steps(
"step 1", "step 2", "step 3", "step 4", "step 5"
)
def _assert_impl_steps(self, *steps):
to_check = self.context.output_buffer.getvalue()
self.context.impl.output_buffer = buf = io.StringIO()
for step in steps:
if step == "BEGIN":
self.context.impl.emit_begin()
elif step == "COMMIT":
self.context.impl.emit_commit()
else:
self.context.impl._exec(step)
eq_(to_check, buf.getvalue())
|
"""Test against the builders in the op.* module."""
from sqlalchemy import Column
from sqlalchemy import event
from sqlalchemy import Integer
from sqlalchemy import String
from sqlalchemy import Table
from sqlalchemy.sql import text
from ...testing.fixtures import AlterColRoundTripFixture
from ...testing.fixtures import TestBase
@event.listens_for(Table, "after_parent_attach")
def _add_cols(table, metadata):
if table.name == "tbl_with_auto_appended_column":
table.append_column(Column("bat", Integer))
class BackendAlterColumnTest(AlterColRoundTripFixture, TestBase):
__backend__ = True
def test_rename_column(self):
self._run_alter_col({}, {"name": "newname"})
def test_modify_type_int_str(self):
self._run_alter_col({"type": Integer()}, {"type": String(50)})
def test_add_server_default_int(self):
self._run_alter_col({"type": Integer}, {"server_default": text("5")})
def test_modify_server_default_int(self):
self._run_alter_col(
{"type": Integer, "server_default": text("2")},
{"server_default": text("5")},
)
def test_modify_nullable_to_non(self):
self._run_alter_col({}, {"nullable": False})
def test_modify_non_nullable_to_nullable(self):
self._run_alter_col({"nullable": False}, {"nullable": True})
|
from __future__ import annotations
from typing import Any
from typing import Dict
from typing import Set
from sqlalchemy import CHAR
from sqlalchemy import CheckConstraint
from sqlalchemy import Column
from sqlalchemy import event
from sqlalchemy import ForeignKey
from sqlalchemy import Index
from sqlalchemy import inspect
from sqlalchemy import Integer
from sqlalchemy import MetaData
from sqlalchemy import Numeric
from sqlalchemy import String
from sqlalchemy import Table
from sqlalchemy import Text
from sqlalchemy import text
from sqlalchemy import UniqueConstraint
from ... import autogenerate
from ... import util
from ...autogenerate import api
from ...ddl.base import _fk_spec
from ...migration import MigrationContext
from ...operations import ops
from ...testing import config
from ...testing import eq_
from ...testing.env import clear_staging_env
from ...testing.env import staging_env
names_in_this_test: Set[Any] = set()
@event.listens_for(Table, "after_parent_attach")
def new_table(table, parent):
names_in_this_test.add(table.name)
def _default_include_object(obj, name, type_, reflected, compare_to):
if type_ == "table":
return name in names_in_this_test
else:
return True
_default_object_filters: Any = _default_include_object
_default_name_filters: Any = None
class ModelOne:
__requires__ = ("unique_constraint_reflection",)
schema: Any = None
@classmethod
def _get_db_schema(cls):
schema = cls.schema
m = MetaData(schema=schema)
Table(
"user",
m,
Column("id", Integer, primary_key=True),
Column("name", String(50)),
Column("a1", Text),
Column("pw", String(50)),
Index("pw_idx", "pw"),
)
Table(
"address",
m,
Column("id", Integer, primary_key=True),
Column("email_address", String(100), nullable=False),
)
Table(
"order",
m,
Column("order_id", Integer, primary_key=True),
Column(
"amount",
Numeric(8, 2),
nullable=False,
server_default=text("0"),
),
CheckConstraint("amount >= 0", name="ck_order_amount"),
)
Table(
"extra",
m,
Column("x", CHAR),
Column("uid", Integer, ForeignKey("user.id")),
)
return m
@classmethod
def _get_model_schema(cls):
schema = cls.schema
m = MetaData(schema=schema)
Table(
"user",
m,
Column("id", Integer, primary_key=True),
Column("name", String(50), nullable=False),
Column("a1", Text, server_default="x"),
)
Table(
"address",
m,
Column("id", Integer, primary_key=True),
Column("email_address", String(100), nullable=False),
Column("street", String(50)),
UniqueConstraint("email_address", name="uq_email"),
)
Table(
"order",
m,
Column("order_id", Integer, primary_key=True),
Column(
"amount",
Numeric(10, 2),
nullable=True,
server_default=text("0"),
),
Column("user_id", Integer, ForeignKey("user.id")),
CheckConstraint("amount > -1", name="ck_order_amount"),
)
Table(
"item",
m,
Column("id", Integer, primary_key=True),
Column("description", String(100)),
Column("order_id", Integer, ForeignKey("order.order_id")),
CheckConstraint("len(description) > 5"),
)
return m
class _ComparesFKs:
def _assert_fk_diff(
self,
diff,
type_,
source_table,
source_columns,
target_table,
target_columns,
name=None,
conditional_name=None,
source_schema=None,
onupdate=None,
ondelete=None,
initially=None,
deferrable=None,
):
# the public API for ForeignKeyConstraint was not very rich
# in 0.7, 0.8, so here we use the well-known but slightly
# private API to get at its elements
(
fk_source_schema,
fk_source_table,
fk_source_columns,
fk_target_schema,
fk_target_table,
fk_target_columns,
fk_onupdate,
fk_ondelete,
fk_deferrable,
fk_initially,
) = _fk_spec(diff[1])
eq_(diff[0], type_)
eq_(fk_source_table, source_table)
eq_(fk_source_columns, source_columns)
eq_(fk_target_table, target_table)
eq_(fk_source_schema, source_schema)
eq_(fk_onupdate, onupdate)
eq_(fk_ondelete, ondelete)
eq_(fk_initially, initially)
eq_(fk_deferrable, deferrable)
eq_([elem.column.name for elem in diff[1].elements], target_columns)
if conditional_name is not None:
if conditional_name == "servergenerated":
fks = inspect(self.bind).get_foreign_keys(source_table)
server_fk_name = fks[0]["name"]
eq_(diff[1].name, server_fk_name)
else:
eq_(diff[1].name, conditional_name)
else:
eq_(diff[1].name, name)
class AutogenTest(_ComparesFKs):
def _flatten_diffs(self, diffs):
for d in diffs:
if isinstance(d, list):
yield from self._flatten_diffs(d)
else:
yield d
@classmethod
def _get_bind(cls):
return config.db
configure_opts: Dict[Any, Any] = {}
@classmethod
def setup_class(cls):
staging_env()
cls.bind = cls._get_bind()
cls.m1 = cls._get_db_schema()
cls.m1.create_all(cls.bind)
cls.m2 = cls._get_model_schema()
@classmethod
def teardown_class(cls):
cls.m1.drop_all(cls.bind)
clear_staging_env()
def setUp(self):
self.conn = conn = self.bind.connect()
ctx_opts = {
"compare_type": True,
"compare_server_default": True,
"target_metadata": self.m2,
"upgrade_token": "upgrades",
"downgrade_token": "downgrades",
"alembic_module_prefix": "op.",
"sqlalchemy_module_prefix": "sa.",
"include_object": _default_object_filters,
"include_name": _default_name_filters,
}
if self.configure_opts:
ctx_opts.update(self.configure_opts)
self.context = context = MigrationContext.configure(
connection=conn, opts=ctx_opts
)
self.autogen_context = api.AutogenContext(context, self.m2)
def tearDown(self):
self.conn.close()
def _update_context(
self, object_filters=None, name_filters=None, include_schemas=None
):
if include_schemas is not None:
self.autogen_context.opts["include_schemas"] = include_schemas
if object_filters is not None:
self.autogen_context._object_filters = [object_filters]
if name_filters is not None:
self.autogen_context._name_filters = [name_filters]
return self.autogen_context
class AutogenFixtureTest(_ComparesFKs):
def _fixture(
self,
m1,
m2,
include_schemas=False,
opts=None,
object_filters=_default_object_filters,
name_filters=_default_name_filters,
return_ops=False,
max_identifier_length=None,
):
if max_identifier_length:
dialect = self.bind.dialect
existing_length = dialect.max_identifier_length
dialect.max_identifier_length = (
dialect._user_defined_max_identifier_length
) = max_identifier_length
try:
self._alembic_metadata, model_metadata = m1, m2
for m in util.to_list(self._alembic_metadata):
m.create_all(self.bind)
with self.bind.connect() as conn:
ctx_opts = {
"compare_type": True,
"compare_server_default": True,
"target_metadata": model_metadata,
"upgrade_token": "upgrades",
"downgrade_token": "downgrades",
"alembic_module_prefix": "op.",
"sqlalchemy_module_prefix": "sa.",
"include_object": object_filters,
"include_name": name_filters,
"include_schemas": include_schemas,
}
if opts:
ctx_opts.update(opts)
self.context = context = MigrationContext.configure(
connection=conn, opts=ctx_opts
)
autogen_context = api.AutogenContext(context, model_metadata)
uo = ops.UpgradeOps(ops=[])
autogenerate._produce_net_changes(autogen_context, uo)
if return_ops:
return uo
else:
return uo.as_diffs()
finally:
if max_identifier_length:
dialect = self.bind.dialect
dialect.max_identifier_length = (
dialect._user_defined_max_identifier_length
) = existing_length
def setUp(self):
staging_env()
self.bind = config.db
def tearDown(self):
if hasattr(self, "_alembic_metadata"):
for m in util.to_list(self._alembic_metadata):
m.drop_all(self.bind)
clear_staging_env()
|
from .test_autogen_comments import * # noqa
from .test_autogen_computed import * # noqa
from .test_autogen_diffs import * # noqa
from .test_autogen_fks import * # noqa
from .test_autogen_identity import * # noqa
from .test_environment import * # noqa
from .test_op import * # noqa
|
# mypy: no-warn-unused-ignores
from __future__ import annotations
from configparser import ConfigParser
import io
import os
import sys
import typing
from typing import Any
from typing import List
from typing import Optional
from typing import Sequence
from typing import Union
if True:
# zimports hack for too-long names
from sqlalchemy.util import ( # noqa: F401
inspect_getfullargspec as inspect_getfullargspec,
)
from sqlalchemy.util.compat import ( # noqa: F401
inspect_formatargspec as inspect_formatargspec,
)
is_posix = os.name == "posix"
py313 = sys.version_info >= (3, 13)
py311 = sys.version_info >= (3, 11)
py310 = sys.version_info >= (3, 10)
py39 = sys.version_info >= (3, 9)
# produce a wrapper that allows encoded text to stream
# into a given buffer, but doesn't close it.
# not sure of a more idiomatic approach to this.
class EncodedIO(io.TextIOWrapper):
def close(self) -> None:
pass
if py39:
from importlib import resources as _resources
importlib_resources = _resources
from importlib import metadata as _metadata
importlib_metadata = _metadata
from importlib.metadata import EntryPoint as EntryPoint
else:
import importlib_resources # type:ignore # noqa
import importlib_metadata # type:ignore # noqa
from importlib_metadata import EntryPoint # type:ignore # noqa
def importlib_metadata_get(group: str) -> Sequence[EntryPoint]:
ep = importlib_metadata.entry_points()
if hasattr(ep, "select"):
return ep.select(group=group)
else:
return ep.get(group, ()) # type: ignore
def formatannotation_fwdref(
annotation: Any, base_module: Optional[Any] = None
) -> str:
"""vendored from python 3.7"""
# copied over _formatannotation from sqlalchemy 2.0
if isinstance(annotation, str):
return annotation
if getattr(annotation, "__module__", None) == "typing":
return repr(annotation).replace("typing.", "").replace("~", "")
if isinstance(annotation, type):
if annotation.__module__ in ("builtins", base_module):
return repr(annotation.__qualname__)
return annotation.__module__ + "." + annotation.__qualname__
elif isinstance(annotation, typing.TypeVar):
return repr(annotation).replace("~", "")
return repr(annotation).replace("~", "")
def read_config_parser(
file_config: ConfigParser,
file_argument: Sequence[Union[str, os.PathLike[str]]],
) -> List[str]:
if py310:
return file_config.read(file_argument, encoding="locale")
else:
return file_config.read(file_argument)
|
from __future__ import annotations
import os
from os.path import exists
from os.path import join
from os.path import splitext
from subprocess import check_call
from typing import Dict
from typing import List
from typing import Mapping
from typing import Optional
from .compat import is_posix
from .exc import CommandError
def open_in_editor(
filename: str, environ: Optional[Dict[str, str]] = None
) -> None:
"""
Opens the given file in a text editor. If the environment variable
``EDITOR`` is set, this is taken as preference.
Otherwise, a list of commonly installed editors is tried.
If no editor matches, an :py:exc:`OSError` is raised.
:param filename: The filename to open. Will be passed verbatim to the
editor command.
:param environ: An optional drop-in replacement for ``os.environ``. Used
mainly for testing.
"""
env = os.environ if environ is None else environ
try:
editor = _find_editor(env)
check_call([editor, filename])
except Exception as exc:
raise CommandError("Error executing editor (%s)" % (exc,)) from exc
def _find_editor(environ: Mapping[str, str]) -> str:
candidates = _default_editors()
for i, var in enumerate(("EDITOR", "VISUAL")):
if var in environ:
user_choice = environ[var]
if exists(user_choice):
return user_choice
if os.sep not in user_choice:
candidates.insert(i, user_choice)
for candidate in candidates:
path = _find_executable(candidate, environ)
if path is not None:
return path
raise OSError(
"No suitable editor found. Please set the "
'"EDITOR" or "VISUAL" environment variables'
)
def _find_executable(
candidate: str, environ: Mapping[str, str]
) -> Optional[str]:
# Assuming this is on the PATH, we need to determine it's absolute
# location. Otherwise, ``check_call`` will fail
if not is_posix and splitext(candidate)[1] != ".exe":
candidate += ".exe"
for path in environ.get("PATH", "").split(os.pathsep):
value = join(path, candidate)
if exists(value):
return value
return None
def _default_editors() -> List[str]:
# Look for an editor. Prefer the user's choice by env-var, fall back to
# most commonly installed editor (nano/vim)
if is_posix:
return ["sensible-editor", "editor", "nano", "vim", "code"]
else:
return ["code.exe", "notepad++.exe", "notepad.exe"]
|
from __future__ import annotations
from typing import Any
from typing import List
from typing import Tuple
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from alembic.autogenerate import RevisionContext
class CommandError(Exception):
pass
class AutogenerateDiffsDetected(CommandError):
def __init__(
self,
message: str,
revision_context: RevisionContext,
diffs: List[Tuple[Any, ...]],
) -> None:
super().__init__(message)
self.revision_context = revision_context
self.diffs = diffs
|
from __future__ import annotations
import collections
from collections.abc import Iterable
import textwrap
from typing import Any
from typing import Callable
from typing import cast
from typing import Dict
from typing import List
from typing import Mapping
from typing import MutableMapping
from typing import NoReturn
from typing import Optional
from typing import overload
from typing import Sequence
from typing import Set
from typing import Tuple
from typing import Type
from typing import TYPE_CHECKING
from typing import TypeVar
from typing import Union
import uuid
import warnings
from sqlalchemy.util import asbool as asbool # noqa: F401
from sqlalchemy.util import immutabledict as immutabledict # noqa: F401
from sqlalchemy.util import to_list as to_list # noqa: F401
from sqlalchemy.util import unique_list as unique_list
from .compat import inspect_getfullargspec
if True:
# zimports workaround :(
from sqlalchemy.util import ( # noqa: F401
memoized_property as memoized_property,
)
EMPTY_DICT: Mapping[Any, Any] = immutabledict()
_T = TypeVar("_T", bound=Any)
_C = TypeVar("_C", bound=Callable[..., Any])
class _ModuleClsMeta(type):
def __setattr__(cls, key: str, value: Callable[..., Any]) -> None:
super().__setattr__(key, value)
cls._update_module_proxies(key) # type: ignore
class ModuleClsProxy(metaclass=_ModuleClsMeta):
"""Create module level proxy functions for the
methods on a given class.
The functions will have a compatible signature
as the methods.
"""
_setups: Dict[
Type[Any],
Tuple[
Set[str],
List[Tuple[MutableMapping[str, Any], MutableMapping[str, Any]]],
],
] = collections.defaultdict(lambda: (set(), []))
@classmethod
def _update_module_proxies(cls, name: str) -> None:
attr_names, modules = cls._setups[cls]
for globals_, locals_ in modules:
cls._add_proxied_attribute(name, globals_, locals_, attr_names)
def _install_proxy(self) -> None:
attr_names, modules = self._setups[self.__class__]
for globals_, locals_ in modules:
globals_["_proxy"] = self
for attr_name in attr_names:
globals_[attr_name] = getattr(self, attr_name)
def _remove_proxy(self) -> None:
attr_names, modules = self._setups[self.__class__]
for globals_, locals_ in modules:
globals_["_proxy"] = None
for attr_name in attr_names:
del globals_[attr_name]
@classmethod
def create_module_class_proxy(
cls,
globals_: MutableMapping[str, Any],
locals_: MutableMapping[str, Any],
) -> None:
attr_names, modules = cls._setups[cls]
modules.append((globals_, locals_))
cls._setup_proxy(globals_, locals_, attr_names)
@classmethod
def _setup_proxy(
cls,
globals_: MutableMapping[str, Any],
locals_: MutableMapping[str, Any],
attr_names: Set[str],
) -> None:
for methname in dir(cls):
cls._add_proxied_attribute(methname, globals_, locals_, attr_names)
@classmethod
def _add_proxied_attribute(
cls,
methname: str,
globals_: MutableMapping[str, Any],
locals_: MutableMapping[str, Any],
attr_names: Set[str],
) -> None:
if not methname.startswith("_"):
meth = getattr(cls, methname)
if callable(meth):
locals_[methname] = cls._create_method_proxy(
methname, globals_, locals_
)
else:
attr_names.add(methname)
@classmethod
def _create_method_proxy(
cls,
name: str,
globals_: MutableMapping[str, Any],
locals_: MutableMapping[str, Any],
) -> Callable[..., Any]:
fn = getattr(cls, name)
def _name_error(name: str, from_: Exception) -> NoReturn:
raise NameError(
"Can't invoke function '%s', as the proxy object has "
"not yet been "
"established for the Alembic '%s' class. "
"Try placing this code inside a callable."
% (name, cls.__name__)
) from from_
globals_["_name_error"] = _name_error
translations = getattr(fn, "_legacy_translations", [])
if translations:
spec = inspect_getfullargspec(fn)
if spec[0] and spec[0][0] == "self":
spec[0].pop(0)
outer_args = inner_args = "*args, **kw"
translate_str = "args, kw = _translate(%r, %r, %r, args, kw)" % (
fn.__name__,
tuple(spec),
translations,
)
def translate(
fn_name: str, spec: Any, translations: Any, args: Any, kw: Any
) -> Any:
return_kw = {}
return_args = []
for oldname, newname in translations:
if oldname in kw:
warnings.warn(
"Argument %r is now named %r "
"for method %s()." % (oldname, newname, fn_name)
)
return_kw[newname] = kw.pop(oldname)
return_kw.update(kw)
args = list(args)
if spec[3]:
pos_only = spec[0][: -len(spec[3])]
else:
pos_only = spec[0]
for arg in pos_only:
if arg not in return_kw:
try:
return_args.append(args.pop(0))
except IndexError:
raise TypeError(
"missing required positional argument: %s"
% arg
)
return_args.extend(args)
return return_args, return_kw
globals_["_translate"] = translate
else:
outer_args = "*args, **kw"
inner_args = "*args, **kw"
translate_str = ""
func_text = textwrap.dedent(
"""\
def %(name)s(%(args)s):
%(doc)r
%(translate)s
try:
p = _proxy
except NameError as ne:
_name_error('%(name)s', ne)
return _proxy.%(name)s(%(apply_kw)s)
e
"""
% {
"name": name,
"translate": translate_str,
"args": outer_args,
"apply_kw": inner_args,
"doc": fn.__doc__,
}
)
lcl: MutableMapping[str, Any] = {}
exec(func_text, cast("Dict[str, Any]", globals_), lcl)
return cast("Callable[..., Any]", lcl[name])
def _with_legacy_names(translations: Any) -> Any:
def decorate(fn: _C) -> _C:
fn._legacy_translations = translations # type: ignore[attr-defined]
return fn
return decorate
def rev_id() -> str:
return uuid.uuid4().hex[-12:]
@overload
def to_tuple(x: Any, default: Tuple[Any, ...]) -> Tuple[Any, ...]: ...
@overload
def to_tuple(x: None, default: Optional[_T] = ...) -> _T: ...
@overload
def to_tuple(
x: Any, default: Optional[Tuple[Any, ...]] = None
) -> Tuple[Any, ...]: ...
def to_tuple(
x: Any, default: Optional[Tuple[Any, ...]] = None
) -> Optional[Tuple[Any, ...]]:
if x is None:
return default
elif isinstance(x, str):
return (x,)
elif isinstance(x, Iterable):
return tuple(x)
else:
return (x,)
def dedupe_tuple(tup: Tuple[str, ...]) -> Tuple[str, ...]:
return tuple(unique_list(tup))
class Dispatcher:
def __init__(self, uselist: bool = False) -> None:
self._registry: Dict[Tuple[Any, ...], Any] = {}
self.uselist = uselist
def dispatch_for(
self, target: Any, qualifier: str = "default"
) -> Callable[[_C], _C]:
def decorate(fn: _C) -> _C:
if self.uselist:
self._registry.setdefault((target, qualifier), []).append(fn)
else:
assert (target, qualifier) not in self._registry
self._registry[(target, qualifier)] = fn
return fn
return decorate
def dispatch(self, obj: Any, qualifier: str = "default") -> Any:
if isinstance(obj, str):
targets: Sequence[Any] = [obj]
elif isinstance(obj, type):
targets = obj.__mro__
else:
targets = type(obj).__mro__
for spcls in targets:
if qualifier != "default" and (spcls, qualifier) in self._registry:
return self._fn_or_list(self._registry[(spcls, qualifier)])
elif (spcls, "default") in self._registry:
return self._fn_or_list(self._registry[(spcls, "default")])
else:
raise ValueError("no dispatch function for object: %s" % obj)
def _fn_or_list(
self, fn_or_list: Union[List[Callable[..., Any]], Callable[..., Any]]
) -> Callable[..., Any]:
if self.uselist:
def go(*arg: Any, **kw: Any) -> None:
if TYPE_CHECKING:
assert isinstance(fn_or_list, Sequence)
for fn in fn_or_list:
fn(*arg, **kw)
return go
else:
return fn_or_list # type: ignore
def branch(self) -> Dispatcher:
"""Return a copy of this dispatcher that is independently
writable."""
d = Dispatcher()
if self.uselist:
d._registry.update(
(k, [fn for fn in self._registry[k]]) for k in self._registry
)
else:
d._registry.update(self._registry)
return d
def not_none(value: Optional[_T]) -> _T:
assert value is not None
return value
|
from __future__ import annotations
from collections.abc import Iterable
from contextlib import contextmanager
import logging
import sys
import textwrap
from typing import Iterator
from typing import Optional
from typing import TextIO
from typing import Union
import warnings
from sqlalchemy.engine import url
log = logging.getLogger(__name__)
# disable "no handler found" errors
logging.getLogger("alembic").addHandler(logging.NullHandler())
try:
import fcntl
import termios
import struct
ioctl = fcntl.ioctl(0, termios.TIOCGWINSZ, struct.pack("HHHH", 0, 0, 0, 0))
_h, TERMWIDTH, _hp, _wp = struct.unpack("HHHH", ioctl)
if TERMWIDTH <= 0: # can occur if running in emacs pseudo-tty
TERMWIDTH = None
except (ImportError, OSError):
TERMWIDTH = None
def write_outstream(
stream: TextIO, *text: Union[str, bytes], quiet: bool = False
) -> None:
if quiet:
return
encoding = getattr(stream, "encoding", "ascii") or "ascii"
for t in text:
if not isinstance(t, bytes):
t = t.encode(encoding, "replace")
t = t.decode(encoding)
try:
stream.write(t)
except OSError:
# suppress "broken pipe" errors.
# no known way to handle this on Python 3 however
# as the exception is "ignored" (noisily) in TextIOWrapper.
break
@contextmanager
def status(
status_msg: str, newline: bool = False, quiet: bool = False
) -> Iterator[None]:
msg(status_msg + " ...", newline, flush=True, quiet=quiet)
try:
yield
except:
if not quiet:
write_outstream(sys.stdout, " FAILED\n")
raise
else:
if not quiet:
write_outstream(sys.stdout, " done\n")
def err(message: str, quiet: bool = False) -> None:
log.error(message)
msg(f"FAILED: {message}", quiet=quiet)
sys.exit(-1)
def obfuscate_url_pw(input_url: str) -> str:
return url.make_url(input_url).render_as_string(hide_password=True)
def warn(msg: str, stacklevel: int = 2) -> None:
warnings.warn(msg, UserWarning, stacklevel=stacklevel)
def msg(
msg: str, newline: bool = True, flush: bool = False, quiet: bool = False
) -> None:
if quiet:
return
if TERMWIDTH is None:
write_outstream(sys.stdout, msg)
if newline:
write_outstream(sys.stdout, "\n")
else:
# left indent output lines
indent = " "
lines = textwrap.wrap(
msg,
TERMWIDTH,
initial_indent=indent,
subsequent_indent=indent,
)
if len(lines) > 1:
for line in lines[0:-1]:
write_outstream(sys.stdout, line, "\n")
write_outstream(sys.stdout, lines[-1], ("\n" if newline else ""))
if flush:
sys.stdout.flush()
def format_as_comma(value: Optional[Union[str, Iterable[str]]]) -> str:
if value is None:
return ""
elif isinstance(value, str):
return value
elif isinstance(value, Iterable):
return ", ".join(value)
else:
raise ValueError("Don't know how to comma-format %r" % value)
|
from __future__ import annotations
import atexit
from contextlib import ExitStack
import importlib
import importlib.machinery
import importlib.util
import os
import re
import tempfile
from types import ModuleType
from typing import Any
from typing import Optional
from mako import exceptions
from mako.template import Template
from . import compat
from .exc import CommandError
def template_to_file(
template_file: str, dest: str, output_encoding: str, **kw: Any
) -> None:
template = Template(filename=template_file)
try:
output = template.render_unicode(**kw).encode(output_encoding)
except:
with tempfile.NamedTemporaryFile(suffix=".txt", delete=False) as ntf:
ntf.write(
exceptions.text_error_template()
.render_unicode()
.encode(output_encoding)
)
fname = ntf.name
raise CommandError(
"Template rendering failed; see %s for a "
"template-oriented traceback." % fname
)
else:
with open(dest, "wb") as f:
f.write(output)
def coerce_resource_to_filename(fname: str) -> str:
"""Interpret a filename as either a filesystem location or as a package
resource.
Names that are non absolute paths and contain a colon
are interpreted as resources and coerced to a file location.
"""
if not os.path.isabs(fname) and ":" in fname:
tokens = fname.split(":")
# from https://importlib-resources.readthedocs.io/en/latest/migration.html#pkg-resources-resource-filename # noqa E501
file_manager = ExitStack()
atexit.register(file_manager.close)
ref = compat.importlib_resources.files(tokens[0])
for tok in tokens[1:]:
ref = ref / tok
fname = file_manager.enter_context( # type: ignore[assignment]
compat.importlib_resources.as_file(ref)
)
return fname
def pyc_file_from_path(path: str) -> Optional[str]:
"""Given a python source path, locate the .pyc."""
candidate = importlib.util.cache_from_source(path)
if os.path.exists(candidate):
return candidate
# even for pep3147, fall back to the old way of finding .pyc files,
# to support sourceless operation
filepath, ext = os.path.splitext(path)
for ext in importlib.machinery.BYTECODE_SUFFIXES:
if os.path.exists(filepath + ext):
return filepath + ext
else:
return None
def load_python_file(dir_: str, filename: str) -> ModuleType:
"""Load a file from the given path as a Python module."""
module_id = re.sub(r"\W", "_", filename)
path = os.path.join(dir_, filename)
_, ext = os.path.splitext(filename)
if ext == ".py":
if os.path.exists(path):
module = load_module_py(module_id, path)
else:
pyc_path = pyc_file_from_path(path)
if pyc_path is None:
raise ImportError("Can't find Python file %s" % path)
else:
module = load_module_py(module_id, pyc_path)
elif ext in (".pyc", ".pyo"):
module = load_module_py(module_id, path)
else:
assert False
return module
def load_module_py(module_id: str, path: str) -> ModuleType:
spec = importlib.util.spec_from_file_location(module_id, path)
assert spec
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module) # type: ignore
return module
|
# mypy: allow-untyped-defs, allow-incomplete-defs, allow-untyped-calls
# mypy: no-warn-return-any, allow-any-generics
from __future__ import annotations
import contextlib
import re
from typing import Any
from typing import Callable
from typing import Dict
from typing import Iterable
from typing import Iterator
from typing import Optional
from typing import Protocol
from typing import Set
from typing import Type
from typing import TYPE_CHECKING
from typing import TypeVar
from typing import Union
from sqlalchemy import __version__
from sqlalchemy import schema
from sqlalchemy import sql
from sqlalchemy import types as sqltypes
from sqlalchemy.schema import CheckConstraint
from sqlalchemy.schema import Column
from sqlalchemy.schema import ForeignKeyConstraint
from sqlalchemy.sql import visitors
from sqlalchemy.sql.base import DialectKWArgs
from sqlalchemy.sql.elements import BindParameter
from sqlalchemy.sql.elements import ColumnClause
from sqlalchemy.sql.elements import TextClause
from sqlalchemy.sql.elements import UnaryExpression
from sqlalchemy.sql.visitors import traverse
from typing_extensions import TypeGuard
if True:
from sqlalchemy.sql.naming import _NONE_NAME as _NONE_NAME # type: ignore[attr-defined] # noqa: E501
if TYPE_CHECKING:
from sqlalchemy import ClauseElement
from sqlalchemy import Identity
from sqlalchemy import Index
from sqlalchemy import Table
from sqlalchemy.engine import Connection
from sqlalchemy.engine import Dialect
from sqlalchemy.engine import Transaction
from sqlalchemy.sql.base import ColumnCollection
from sqlalchemy.sql.compiler import SQLCompiler
from sqlalchemy.sql.elements import ColumnElement
from sqlalchemy.sql.schema import Constraint
from sqlalchemy.sql.schema import SchemaItem
_CE = TypeVar("_CE", bound=Union["ColumnElement[Any]", "SchemaItem"])
class _CompilerProtocol(Protocol):
def __call__(self, element: Any, compiler: Any, **kw: Any) -> str: ...
def _safe_int(value: str) -> Union[int, str]:
try:
return int(value)
except:
return value
_vers = tuple(
[_safe_int(x) for x in re.findall(r"(\d+|[abc]\d)", __version__)]
)
# https://docs.sqlalchemy.org/en/latest/changelog/changelog_14.html#change-0c6e0cc67dfe6fac5164720e57ef307d
sqla_14_18 = _vers >= (1, 4, 18)
sqla_14_26 = _vers >= (1, 4, 26)
sqla_2 = _vers >= (2,)
sqlalchemy_version = __version__
if TYPE_CHECKING:
def compiles(
element: Type[ClauseElement], *dialects: str
) -> Callable[[_CompilerProtocol], _CompilerProtocol]: ...
else:
from sqlalchemy.ext.compiler import compiles
identity_has_dialect_kwargs = issubclass(schema.Identity, DialectKWArgs)
def _get_identity_options_dict(
identity: Union[Identity, schema.Sequence, None],
dialect_kwargs: bool = False,
) -> Dict[str, Any]:
if identity is None:
return {}
elif identity_has_dialect_kwargs:
assert hasattr(identity, "_as_dict")
as_dict = identity._as_dict()
if dialect_kwargs:
assert isinstance(identity, DialectKWArgs)
as_dict.update(identity.dialect_kwargs)
else:
as_dict = {}
if isinstance(identity, schema.Identity):
# always=None means something different than always=False
as_dict["always"] = identity.always
if identity.on_null is not None:
as_dict["on_null"] = identity.on_null
# attributes common to Identity and Sequence
attrs = (
"start",
"increment",
"minvalue",
"maxvalue",
"nominvalue",
"nomaxvalue",
"cycle",
"cache",
"order",
)
as_dict.update(
{
key: getattr(identity, key, None)
for key in attrs
if getattr(identity, key, None) is not None
}
)
return as_dict
if sqla_2:
from sqlalchemy.sql.base import _NoneName
else:
from sqlalchemy.util import symbol as _NoneName # type: ignore[assignment]
_ConstraintName = Union[None, str, _NoneName]
_ConstraintNameDefined = Union[str, _NoneName]
def constraint_name_defined(
name: _ConstraintName,
) -> TypeGuard[_ConstraintNameDefined]:
return name is _NONE_NAME or isinstance(name, (str, _NoneName))
def constraint_name_string(name: _ConstraintName) -> TypeGuard[str]:
return isinstance(name, str)
def constraint_name_or_none(name: _ConstraintName) -> Optional[str]:
return name if constraint_name_string(name) else None
AUTOINCREMENT_DEFAULT = "auto"
@contextlib.contextmanager
def _ensure_scope_for_ddl(
connection: Optional[Connection],
) -> Iterator[None]:
try:
in_transaction = connection.in_transaction # type: ignore[union-attr]
except AttributeError:
# catch for MockConnection, None
in_transaction = None
pass
# yield outside the catch
if in_transaction is None:
yield
else:
if not in_transaction():
assert connection is not None
with connection.begin():
yield
else:
yield
def _safe_begin_connection_transaction(
connection: Connection,
) -> Transaction:
transaction = connection.get_transaction()
if transaction:
return transaction
else:
return connection.begin()
def _safe_commit_connection_transaction(
connection: Connection,
) -> None:
transaction = connection.get_transaction()
if transaction:
transaction.commit()
def _safe_rollback_connection_transaction(
connection: Connection,
) -> None:
transaction = connection.get_transaction()
if transaction:
transaction.rollback()
def _get_connection_in_transaction(connection: Optional[Connection]) -> bool:
try:
in_transaction = connection.in_transaction # type: ignore
except AttributeError:
# catch for MockConnection
return False
else:
return in_transaction()
def _idx_table_bound_expressions(idx: Index) -> Iterable[ColumnElement[Any]]:
return idx.expressions # type: ignore
def _copy(schema_item: _CE, **kw) -> _CE:
if hasattr(schema_item, "_copy"):
return schema_item._copy(**kw)
else:
return schema_item.copy(**kw) # type: ignore[union-attr]
def _connectable_has_table(
connectable: Connection, tablename: str, schemaname: Union[str, None]
) -> bool:
return connectable.dialect.has_table(connectable, tablename, schemaname)
def _exec_on_inspector(inspector, statement, **params):
with inspector._operation_context() as conn:
return conn.execute(statement, params)
def _nullability_might_be_unset(metadata_column):
from sqlalchemy.sql import schema
return metadata_column._user_defined_nullable is schema.NULL_UNSPECIFIED
def _server_default_is_computed(*server_default) -> bool:
return any(isinstance(sd, schema.Computed) for sd in server_default)
def _server_default_is_identity(*server_default) -> bool:
return any(isinstance(sd, schema.Identity) for sd in server_default)
def _table_for_constraint(constraint: Constraint) -> Table:
if isinstance(constraint, ForeignKeyConstraint):
table = constraint.parent
assert table is not None
return table # type: ignore[return-value]
else:
return constraint.table
def _columns_for_constraint(constraint):
if isinstance(constraint, ForeignKeyConstraint):
return [fk.parent for fk in constraint.elements]
elif isinstance(constraint, CheckConstraint):
return _find_columns(constraint.sqltext)
else:
return list(constraint.columns)
def _resolve_for_variant(type_, dialect):
if _type_has_variants(type_):
base_type, mapping = _get_variant_mapping(type_)
return mapping.get(dialect.name, base_type)
else:
return type_
if hasattr(sqltypes.TypeEngine, "_variant_mapping"): # 2.0
def _type_has_variants(type_):
return bool(type_._variant_mapping)
def _get_variant_mapping(type_):
return type_, type_._variant_mapping
else:
def _type_has_variants(type_):
return type(type_) is sqltypes.Variant
def _get_variant_mapping(type_):
return type_.impl, type_.mapping
def _fk_spec(constraint: ForeignKeyConstraint) -> Any:
if TYPE_CHECKING:
assert constraint.columns is not None
assert constraint.elements is not None
assert isinstance(constraint.parent, Table)
source_columns = [
constraint.columns[key].name for key in constraint.column_keys
]
source_table = constraint.parent.name
source_schema = constraint.parent.schema
target_schema = constraint.elements[0].column.table.schema
target_table = constraint.elements[0].column.table.name
target_columns = [element.column.name for element in constraint.elements]
ondelete = constraint.ondelete
onupdate = constraint.onupdate
deferrable = constraint.deferrable
initially = constraint.initially
return (
source_schema,
source_table,
source_columns,
target_schema,
target_table,
target_columns,
onupdate,
ondelete,
deferrable,
initially,
)
def _fk_is_self_referential(constraint: ForeignKeyConstraint) -> bool:
spec = constraint.elements[0]._get_colspec()
tokens = spec.split(".")
tokens.pop(-1) # colname
tablekey = ".".join(tokens)
assert constraint.parent is not None
return tablekey == constraint.parent.key
def _is_type_bound(constraint: Constraint) -> bool:
# this deals with SQLAlchemy #3260, don't copy CHECK constraints
# that will be generated by the type.
# new feature added for #3260
return constraint._type_bound
def _find_columns(clause):
"""locate Column objects within the given expression."""
cols: Set[ColumnElement[Any]] = set()
traverse(clause, {}, {"column": cols.add})
return cols
def _remove_column_from_collection(
collection: ColumnCollection, column: Union[Column[Any], ColumnClause[Any]]
) -> None:
"""remove a column from a ColumnCollection."""
# workaround for older SQLAlchemy, remove the
# same object that's present
assert column.key is not None
to_remove = collection[column.key]
# SQLAlchemy 2.0 will use more ReadOnlyColumnCollection
# (renamed from ImmutableColumnCollection)
if hasattr(collection, "_immutable") or hasattr(collection, "_readonly"):
collection._parent.remove(to_remove)
else:
collection.remove(to_remove)
def _textual_index_column(
table: Table, text_: Union[str, TextClause, ColumnElement[Any]]
) -> Union[ColumnElement[Any], Column[Any]]:
"""a workaround for the Index construct's severe lack of flexibility"""
if isinstance(text_, str):
c = Column(text_, sqltypes.NULLTYPE)
table.append_column(c)
return c
elif isinstance(text_, TextClause):
return _textual_index_element(table, text_)
elif isinstance(text_, _textual_index_element):
return _textual_index_column(table, text_.text)
elif isinstance(text_, sql.ColumnElement):
return _copy_expression(text_, table)
else:
raise ValueError("String or text() construct expected")
def _copy_expression(expression: _CE, target_table: Table) -> _CE:
def replace(col):
if (
isinstance(col, Column)
and col.table is not None
and col.table is not target_table
):
if col.name in target_table.c:
return target_table.c[col.name]
else:
c = _copy(col)
target_table.append_column(c)
return c
else:
return None
return visitors.replacement_traverse( # type: ignore[call-overload]
expression, {}, replace
)
class _textual_index_element(sql.ColumnElement):
"""Wrap around a sqlalchemy text() construct in such a way that
we appear like a column-oriented SQL expression to an Index
construct.
The issue here is that currently the Postgresql dialect, the biggest
recipient of functional indexes, keys all the index expressions to
the corresponding column expressions when rendering CREATE INDEX,
so the Index we create here needs to have a .columns collection that
is the same length as the .expressions collection. Ultimately
SQLAlchemy should support text() expressions in indexes.
See SQLAlchemy issue 3174.
"""
__visit_name__ = "_textual_idx_element"
def __init__(self, table: Table, text: TextClause) -> None:
self.table = table
self.text = text
self.key = text.text
self.fake_column = schema.Column(self.text.text, sqltypes.NULLTYPE)
table.append_column(self.fake_column)
def get_children(self, **kw):
return [self.fake_column]
@compiles(_textual_index_element)
def _render_textual_index_column(
element: _textual_index_element, compiler: SQLCompiler, **kw
) -> str:
return compiler.process(element.text, **kw)
class _literal_bindparam(BindParameter):
pass
@compiles(_literal_bindparam)
def _render_literal_bindparam(
element: _literal_bindparam, compiler: SQLCompiler, **kw
) -> str:
return compiler.render_literal_bindparam(element, **kw)
def _get_constraint_final_name(
constraint: Union[Index, Constraint], dialect: Optional[Dialect]
) -> Optional[str]:
if constraint.name is None:
return None
assert dialect is not None
# for SQLAlchemy 1.4 we would like to have the option to expand
# the use of "deferred" names for constraints as well as to have
# some flexibility with "None" name and similar; make use of new
# SQLAlchemy API to return what would be the final compiled form of
# the name for this dialect.
return dialect.identifier_preparer.format_constraint(
constraint, _alembic_quote=False
)
def _constraint_is_named(
constraint: Union[Constraint, Index], dialect: Optional[Dialect]
) -> bool:
if constraint.name is None:
return False
assert dialect is not None
name = dialect.identifier_preparer.format_constraint(
constraint, _alembic_quote=False
)
return name is not None
def is_expression_index(index: Index) -> bool:
for expr in index.expressions:
if is_expression(expr):
return True
return False
def is_expression(expr: Any) -> bool:
while isinstance(expr, UnaryExpression):
expr = expr.element
if not isinstance(expr, ColumnClause) or expr.is_literal:
return True
return False
|
from .editor import open_in_editor as open_in_editor
from .exc import AutogenerateDiffsDetected as AutogenerateDiffsDetected
from .exc import CommandError as CommandError
from .langhelpers import _with_legacy_names as _with_legacy_names
from .langhelpers import asbool as asbool
from .langhelpers import dedupe_tuple as dedupe_tuple
from .langhelpers import Dispatcher as Dispatcher
from .langhelpers import EMPTY_DICT as EMPTY_DICT
from .langhelpers import immutabledict as immutabledict
from .langhelpers import memoized_property as memoized_property
from .langhelpers import ModuleClsProxy as ModuleClsProxy
from .langhelpers import not_none as not_none
from .langhelpers import rev_id as rev_id
from .langhelpers import to_list as to_list
from .langhelpers import to_tuple as to_tuple
from .langhelpers import unique_list as unique_list
from .messaging import err as err
from .messaging import format_as_comma as format_as_comma
from .messaging import msg as msg
from .messaging import obfuscate_url_pw as obfuscate_url_pw
from .messaging import status as status
from .messaging import warn as warn
from .messaging import write_outstream as write_outstream
from .pyfiles import coerce_resource_to_filename as coerce_resource_to_filename
from .pyfiles import load_python_file as load_python_file
from .pyfiles import pyc_file_from_path as pyc_file_from_path
from .pyfiles import template_to_file as template_to_file
from .sqla_compat import sqla_2 as sqla_2
|
import math
import sys
from datetime import date, datetime, timedelta, timezone
from decimal import Decimal
from typing import Any, Dict, Iterable, Iterator, List, NamedTuple, Set, Tuple
if sys.version_info < (3, 9):
from typing_extensions import Annotated
else:
from typing import Annotated
import annotated_types as at
class Case(NamedTuple):
"""
A test case for `annotated_types`.
"""
annotation: Any
valid_cases: Iterable[Any]
invalid_cases: Iterable[Any]
def cases() -> Iterable[Case]:
# Gt, Ge, Lt, Le
yield Case(Annotated[int, at.Gt(4)], (5, 6, 1000), (4, 0, -1))
yield Case(Annotated[float, at.Gt(0.5)], (0.6, 0.7, 0.8, 0.9), (0.5, 0.0, -0.1))
yield Case(
Annotated[datetime, at.Gt(datetime(2000, 1, 1))],
[datetime(2000, 1, 2), datetime(2000, 1, 3)],
[datetime(2000, 1, 1), datetime(1999, 12, 31)],
)
yield Case(
Annotated[datetime, at.Gt(date(2000, 1, 1))],
[date(2000, 1, 2), date(2000, 1, 3)],
[date(2000, 1, 1), date(1999, 12, 31)],
)
yield Case(
Annotated[datetime, at.Gt(Decimal('1.123'))],
[Decimal('1.1231'), Decimal('123')],
[Decimal('1.123'), Decimal('0')],
)
yield Case(Annotated[int, at.Ge(4)], (4, 5, 6, 1000, 4), (0, -1))
yield Case(Annotated[float, at.Ge(0.5)], (0.5, 0.6, 0.7, 0.8, 0.9), (0.4, 0.0, -0.1))
yield Case(
Annotated[datetime, at.Ge(datetime(2000, 1, 1))],
[datetime(2000, 1, 2), datetime(2000, 1, 3)],
[datetime(1998, 1, 1), datetime(1999, 12, 31)],
)
yield Case(Annotated[int, at.Lt(4)], (0, -1), (4, 5, 6, 1000, 4))
yield Case(Annotated[float, at.Lt(0.5)], (0.4, 0.0, -0.1), (0.5, 0.6, 0.7, 0.8, 0.9))
yield Case(
Annotated[datetime, at.Lt(datetime(2000, 1, 1))],
[datetime(1999, 12, 31), datetime(1999, 12, 31)],
[datetime(2000, 1, 2), datetime(2000, 1, 3)],
)
yield Case(Annotated[int, at.Le(4)], (4, 0, -1), (5, 6, 1000))
yield Case(Annotated[float, at.Le(0.5)], (0.5, 0.0, -0.1), (0.6, 0.7, 0.8, 0.9))
yield Case(
Annotated[datetime, at.Le(datetime(2000, 1, 1))],
[datetime(2000, 1, 1), datetime(1999, 12, 31)],
[datetime(2000, 1, 2), datetime(2000, 1, 3)],
)
# Interval
yield Case(Annotated[int, at.Interval(gt=4)], (5, 6, 1000), (4, 0, -1))
yield Case(Annotated[int, at.Interval(gt=4, lt=10)], (5, 6), (4, 10, 1000, 0, -1))
yield Case(Annotated[float, at.Interval(ge=0.5, le=1)], (0.5, 0.9, 1), (0.49, 1.1))
yield Case(
Annotated[datetime, at.Interval(gt=datetime(2000, 1, 1), le=datetime(2000, 1, 3))],
[datetime(2000, 1, 2), datetime(2000, 1, 3)],
[datetime(2000, 1, 1), datetime(2000, 1, 4)],
)
yield Case(Annotated[int, at.MultipleOf(multiple_of=3)], (0, 3, 9), (1, 2, 4))
yield Case(Annotated[float, at.MultipleOf(multiple_of=0.5)], (0, 0.5, 1, 1.5), (0.4, 1.1))
# lengths
yield Case(Annotated[str, at.MinLen(3)], ('123', '1234', 'x' * 10), ('', '1', '12'))
yield Case(Annotated[str, at.Len(3)], ('123', '1234', 'x' * 10), ('', '1', '12'))
yield Case(Annotated[List[int], at.MinLen(3)], ([1, 2, 3], [1, 2, 3, 4], [1] * 10), ([], [1], [1, 2]))
yield Case(Annotated[List[int], at.Len(3)], ([1, 2, 3], [1, 2, 3, 4], [1] * 10), ([], [1], [1, 2]))
yield Case(Annotated[str, at.MaxLen(4)], ('', '1234'), ('12345', 'x' * 10))
yield Case(Annotated[str, at.Len(0, 4)], ('', '1234'), ('12345', 'x' * 10))
yield Case(Annotated[List[str], at.MaxLen(4)], ([], ['a', 'bcdef'], ['a', 'b', 'c']), (['a'] * 5, ['b'] * 10))
yield Case(Annotated[List[str], at.Len(0, 4)], ([], ['a', 'bcdef'], ['a', 'b', 'c']), (['a'] * 5, ['b'] * 10))
yield Case(Annotated[str, at.Len(3, 5)], ('123', '12345'), ('', '1', '12', '123456', 'x' * 10))
yield Case(Annotated[str, at.Len(3, 3)], ('123',), ('12', '1234'))
yield Case(Annotated[Dict[int, int], at.Len(2, 3)], [{1: 1, 2: 2}], [{}, {1: 1}, {1: 1, 2: 2, 3: 3, 4: 4}])
yield Case(Annotated[Set[int], at.Len(2, 3)], ({1, 2}, {1, 2, 3}), (set(), {1}, {1, 2, 3, 4}))
yield Case(Annotated[Tuple[int, ...], at.Len(2, 3)], ((1, 2), (1, 2, 3)), ((), (1,), (1, 2, 3, 4)))
# Timezone
yield Case(
Annotated[datetime, at.Timezone(None)], [datetime(2000, 1, 1)], [datetime(2000, 1, 1, tzinfo=timezone.utc)]
)
yield Case(
Annotated[datetime, at.Timezone(...)], [datetime(2000, 1, 1, tzinfo=timezone.utc)], [datetime(2000, 1, 1)]
)
yield Case(
Annotated[datetime, at.Timezone(timezone.utc)],
[datetime(2000, 1, 1, tzinfo=timezone.utc)],
[datetime(2000, 1, 1), datetime(2000, 1, 1, tzinfo=timezone(timedelta(hours=6)))],
)
yield Case(
Annotated[datetime, at.Timezone('Europe/London')],
[datetime(2000, 1, 1, tzinfo=timezone(timedelta(0), name='Europe/London'))],
[datetime(2000, 1, 1), datetime(2000, 1, 1, tzinfo=timezone(timedelta(hours=6)))],
)
# Quantity
yield Case(Annotated[float, at.Unit(unit='m')], (5, 4.2), ('5m', '4.2m'))
# predicate types
yield Case(at.LowerCase[str], ['abc', 'foobar'], ['', 'A', 'Boom'])
yield Case(at.UpperCase[str], ['ABC', 'DEFO'], ['', 'a', 'abc', 'AbC'])
yield Case(at.IsDigit[str], ['123'], ['', 'ab', 'a1b2'])
yield Case(at.IsAscii[str], ['123', 'foo bar'], ['£100', '😊', 'whatever 👀'])
yield Case(Annotated[int, at.Predicate(lambda x: x % 2 == 0)], [0, 2, 4], [1, 3, 5])
yield Case(at.IsFinite[float], [1.23], [math.nan, math.inf, -math.inf])
yield Case(at.IsNotFinite[float], [math.nan, math.inf], [1.23])
yield Case(at.IsNan[float], [math.nan], [1.23, math.inf])
yield Case(at.IsNotNan[float], [1.23, math.inf], [math.nan])
yield Case(at.IsInfinite[float], [math.inf], [math.nan, 1.23])
yield Case(at.IsNotInfinite[float], [math.nan, 1.23], [math.inf])
# check stacked predicates
yield Case(at.IsInfinite[Annotated[float, at.Predicate(lambda x: x > 0)]], [math.inf], [-math.inf, 1.23, math.nan])
# doc
yield Case(Annotated[int, at.doc("A number")], [1, 2], [])
# custom GroupedMetadata
class MyCustomGroupedMetadata(at.GroupedMetadata):
def __iter__(self) -> Iterator[at.Predicate]:
yield at.Predicate(lambda x: float(x).is_integer())
yield Case(Annotated[float, MyCustomGroupedMetadata()], [0, 2.0], [0.01, 1.5])
|
import math
import sys
import types
from dataclasses import dataclass
from datetime import tzinfo
from typing import TYPE_CHECKING, Any, Callable, Iterator, Optional, SupportsFloat, SupportsIndex, TypeVar, Union
if sys.version_info < (3, 8):
from typing_extensions import Protocol, runtime_checkable
else:
from typing import Protocol, runtime_checkable
if sys.version_info < (3, 9):
from typing_extensions import Annotated, Literal
else:
from typing import Annotated, Literal
if sys.version_info < (3, 10):
EllipsisType = type(Ellipsis)
KW_ONLY = {}
SLOTS = {}
else:
from types import EllipsisType
KW_ONLY = {"kw_only": True}
SLOTS = {"slots": True}
__all__ = (
'BaseMetadata',
'GroupedMetadata',
'Gt',
'Ge',
'Lt',
'Le',
'Interval',
'MultipleOf',
'MinLen',
'MaxLen',
'Len',
'Timezone',
'Predicate',
'LowerCase',
'UpperCase',
'IsDigits',
'IsFinite',
'IsNotFinite',
'IsNan',
'IsNotNan',
'IsInfinite',
'IsNotInfinite',
'doc',
'DocInfo',
'__version__',
)
__version__ = '0.7.0'
T = TypeVar('T')
# arguments that start with __ are considered
# positional only
# see https://peps.python.org/pep-0484/#positional-only-arguments
class SupportsGt(Protocol):
def __gt__(self: T, __other: T) -> bool:
...
class SupportsGe(Protocol):
def __ge__(self: T, __other: T) -> bool:
...
class SupportsLt(Protocol):
def __lt__(self: T, __other: T) -> bool:
...
class SupportsLe(Protocol):
def __le__(self: T, __other: T) -> bool:
...
class SupportsMod(Protocol):
def __mod__(self: T, __other: T) -> T:
...
class SupportsDiv(Protocol):
def __div__(self: T, __other: T) -> T:
...
class BaseMetadata:
"""Base class for all metadata.
This exists mainly so that implementers
can do `isinstance(..., BaseMetadata)` while traversing field annotations.
"""
__slots__ = ()
@dataclass(frozen=True, **SLOTS)
class Gt(BaseMetadata):
"""Gt(gt=x) implies that the value must be greater than x.
It can be used with any type that supports the ``>`` operator,
including numbers, dates and times, strings, sets, and so on.
"""
gt: SupportsGt
@dataclass(frozen=True, **SLOTS)
class Ge(BaseMetadata):
"""Ge(ge=x) implies that the value must be greater than or equal to x.
It can be used with any type that supports the ``>=`` operator,
including numbers, dates and times, strings, sets, and so on.
"""
ge: SupportsGe
@dataclass(frozen=True, **SLOTS)
class Lt(BaseMetadata):
"""Lt(lt=x) implies that the value must be less than x.
It can be used with any type that supports the ``<`` operator,
including numbers, dates and times, strings, sets, and so on.
"""
lt: SupportsLt
@dataclass(frozen=True, **SLOTS)
class Le(BaseMetadata):
"""Le(le=x) implies that the value must be less than or equal to x.
It can be used with any type that supports the ``<=`` operator,
including numbers, dates and times, strings, sets, and so on.
"""
le: SupportsLe
@runtime_checkable
class GroupedMetadata(Protocol):
"""A grouping of multiple objects, like typing.Unpack.
`GroupedMetadata` on its own is not metadata and has no meaning.
All of the constraints and metadata should be fully expressable
in terms of the `BaseMetadata`'s returned by `GroupedMetadata.__iter__()`.
Concrete implementations should override `GroupedMetadata.__iter__()`
to add their own metadata.
For example:
>>> @dataclass
>>> class Field(GroupedMetadata):
>>> gt: float | None = None
>>> description: str | None = None
...
>>> def __iter__(self) -> Iterable[object]:
>>> if self.gt is not None:
>>> yield Gt(self.gt)
>>> if self.description is not None:
>>> yield Description(self.gt)
Also see the implementation of `Interval` below for an example.
Parsers should recognize this and unpack it so that it can be used
both with and without unpacking:
- `Annotated[int, Field(...)]` (parser must unpack Field)
- `Annotated[int, *Field(...)]` (PEP-646)
""" # noqa: trailing-whitespace
@property
def __is_annotated_types_grouped_metadata__(self) -> Literal[True]:
return True
def __iter__(self) -> Iterator[object]:
...
if not TYPE_CHECKING:
__slots__ = () # allow subclasses to use slots
def __init_subclass__(cls, *args: Any, **kwargs: Any) -> None:
# Basic ABC like functionality without the complexity of an ABC
super().__init_subclass__(*args, **kwargs)
if cls.__iter__ is GroupedMetadata.__iter__:
raise TypeError("Can't subclass GroupedMetadata without implementing __iter__")
def __iter__(self) -> Iterator[object]: # noqa: F811
raise NotImplementedError # more helpful than "None has no attribute..." type errors
@dataclass(frozen=True, **KW_ONLY, **SLOTS)
class Interval(GroupedMetadata):
"""Interval can express inclusive or exclusive bounds with a single object.
It accepts keyword arguments ``gt``, ``ge``, ``lt``, and/or ``le``, which
are interpreted the same way as the single-bound constraints.
"""
gt: Union[SupportsGt, None] = None
ge: Union[SupportsGe, None] = None
lt: Union[SupportsLt, None] = None
le: Union[SupportsLe, None] = None
def __iter__(self) -> Iterator[BaseMetadata]:
"""Unpack an Interval into zero or more single-bounds."""
if self.gt is not None:
yield Gt(self.gt)
if self.ge is not None:
yield Ge(self.ge)
if self.lt is not None:
yield Lt(self.lt)
if self.le is not None:
yield Le(self.le)
@dataclass(frozen=True, **SLOTS)
class MultipleOf(BaseMetadata):
"""MultipleOf(multiple_of=x) might be interpreted in two ways:
1. Python semantics, implying ``value % multiple_of == 0``, or
2. JSONschema semantics, where ``int(value / multiple_of) == value / multiple_of``
We encourage users to be aware of these two common interpretations,
and libraries to carefully document which they implement.
"""
multiple_of: Union[SupportsDiv, SupportsMod]
@dataclass(frozen=True, **SLOTS)
class MinLen(BaseMetadata):
"""
MinLen() implies minimum inclusive length,
e.g. ``len(value) >= min_length``.
"""
min_length: Annotated[int, Ge(0)]
@dataclass(frozen=True, **SLOTS)
class MaxLen(BaseMetadata):
"""
MaxLen() implies maximum inclusive length,
e.g. ``len(value) <= max_length``.
"""
max_length: Annotated[int, Ge(0)]
@dataclass(frozen=True, **SLOTS)
class Len(GroupedMetadata):
"""
Len() implies that ``min_length <= len(value) <= max_length``.
Upper bound may be omitted or ``None`` to indicate no upper length bound.
"""
min_length: Annotated[int, Ge(0)] = 0
max_length: Optional[Annotated[int, Ge(0)]] = None
def __iter__(self) -> Iterator[BaseMetadata]:
"""Unpack a Len into zone or more single-bounds."""
if self.min_length > 0:
yield MinLen(self.min_length)
if self.max_length is not None:
yield MaxLen(self.max_length)
@dataclass(frozen=True, **SLOTS)
class Timezone(BaseMetadata):
"""Timezone(tz=...) requires a datetime to be aware (or ``tz=None``, naive).
``Annotated[datetime, Timezone(None)]`` must be a naive datetime.
``Timezone[...]`` (the ellipsis literal) expresses that the datetime must be
tz-aware but any timezone is allowed.
You may also pass a specific timezone string or tzinfo object such as
``Timezone(timezone.utc)`` or ``Timezone("Africa/Abidjan")`` to express that
you only allow a specific timezone, though we note that this is often
a symptom of poor design.
"""
tz: Union[str, tzinfo, EllipsisType, None]
@dataclass(frozen=True, **SLOTS)
class Unit(BaseMetadata):
"""Indicates that the value is a physical quantity with the specified unit.
It is intended for usage with numeric types, where the value represents the
magnitude of the quantity. For example, ``distance: Annotated[float, Unit('m')]``
or ``speed: Annotated[float, Unit('m/s')]``.
Interpretation of the unit string is left to the discretion of the consumer.
It is suggested to follow conventions established by python libraries that work
with physical quantities, such as
- ``pint`` : <https://pint.readthedocs.io/en/stable/>
- ``astropy.units``: <https://docs.astropy.org/en/stable/units/>
For indicating a quantity with a certain dimensionality but without a specific unit
it is recommended to use square brackets, e.g. `Annotated[float, Unit('[time]')]`.
Note, however, ``annotated_types`` itself makes no use of the unit string.
"""
unit: str
@dataclass(frozen=True, **SLOTS)
class Predicate(BaseMetadata):
"""``Predicate(func: Callable)`` implies `func(value)` is truthy for valid values.
Users should prefer statically inspectable metadata, but if you need the full
power and flexibility of arbitrary runtime predicates... here it is.
We provide a few predefined predicates for common string constraints:
``IsLower = Predicate(str.islower)``, ``IsUpper = Predicate(str.isupper)``, and
``IsDigits = Predicate(str.isdigit)``. Users are encouraged to use methods which
can be given special handling, and avoid indirection like ``lambda s: s.lower()``.
Some libraries might have special logic to handle certain predicates, e.g. by
checking for `str.isdigit` and using its presence to both call custom logic to
enforce digit-only strings, and customise some generated external schema.
We do not specify what behaviour should be expected for predicates that raise
an exception. For example `Annotated[int, Predicate(str.isdigit)]` might silently
skip invalid constraints, or statically raise an error; or it might try calling it
and then propagate or discard the resulting exception.
"""
func: Callable[[Any], bool]
def __repr__(self) -> str:
if getattr(self.func, "__name__", "<lambda>") == "<lambda>":
return f"{self.__class__.__name__}({self.func!r})"
if isinstance(self.func, (types.MethodType, types.BuiltinMethodType)) and (
namespace := getattr(self.func.__self__, "__name__", None)
):
return f"{self.__class__.__name__}({namespace}.{self.func.__name__})"
if isinstance(self.func, type(str.isascii)): # method descriptor
return f"{self.__class__.__name__}({self.func.__qualname__})"
return f"{self.__class__.__name__}({self.func.__name__})"
@dataclass
class Not:
func: Callable[[Any], bool]
def __call__(self, __v: Any) -> bool:
return not self.func(__v)
_StrType = TypeVar("_StrType", bound=str)
LowerCase = Annotated[_StrType, Predicate(str.islower)]
"""
Return True if the string is a lowercase string, False otherwise.
A string is lowercase if all cased characters in the string are lowercase and there is at least one cased character in the string.
""" # noqa: E501
UpperCase = Annotated[_StrType, Predicate(str.isupper)]
"""
Return True if the string is an uppercase string, False otherwise.
A string is uppercase if all cased characters in the string are uppercase and there is at least one cased character in the string.
""" # noqa: E501
IsDigit = Annotated[_StrType, Predicate(str.isdigit)]
IsDigits = IsDigit # type: ignore # plural for backwards compatibility, see #63
"""
Return True if the string is a digit string, False otherwise.
A string is a digit string if all characters in the string are digits and there is at least one character in the string.
""" # noqa: E501
IsAscii = Annotated[_StrType, Predicate(str.isascii)]
"""
Return True if all characters in the string are ASCII, False otherwise.
ASCII characters have code points in the range U+0000-U+007F. Empty string is ASCII too.
"""
_NumericType = TypeVar('_NumericType', bound=Union[SupportsFloat, SupportsIndex])
IsFinite = Annotated[_NumericType, Predicate(math.isfinite)]
"""Return True if x is neither an infinity nor a NaN, and False otherwise."""
IsNotFinite = Annotated[_NumericType, Predicate(Not(math.isfinite))]
"""Return True if x is one of infinity or NaN, and False otherwise"""
IsNan = Annotated[_NumericType, Predicate(math.isnan)]
"""Return True if x is a NaN (not a number), and False otherwise."""
IsNotNan = Annotated[_NumericType, Predicate(Not(math.isnan))]
"""Return True if x is anything but NaN (not a number), and False otherwise."""
IsInfinite = Annotated[_NumericType, Predicate(math.isinf)]
"""Return True if x is a positive or negative infinity, and False otherwise."""
IsNotInfinite = Annotated[_NumericType, Predicate(Not(math.isinf))]
"""Return True if x is neither a positive or negative infinity, and False otherwise."""
try:
from typing_extensions import DocInfo, doc # type: ignore [attr-defined]
except ImportError:
@dataclass(frozen=True, **SLOTS)
class DocInfo: # type: ignore [no-redef]
""" "
The return value of doc(), mainly to be used by tools that want to extract the
Annotated documentation at runtime.
"""
documentation: str
"""The documentation string passed to doc()."""
def doc(
documentation: str,
) -> DocInfo:
"""
Add documentation to a type annotation inside of Annotated.
For example:
>>> def hi(name: Annotated[int, doc("The name of the user")]) -> None: ...
"""
return DocInfo(documentation)
|
from __future__ import annotations
import sys
from collections.abc import Awaitable, Callable, Generator
from concurrent.futures import Future
from contextlib import (
AbstractAsyncContextManager,
AbstractContextManager,
contextmanager,
)
from dataclasses import dataclass, field
from inspect import isawaitable
from threading import Lock, Thread, get_ident
from types import TracebackType
from typing import (
Any,
Generic,
TypeVar,
cast,
overload,
)
from ._core import _eventloop
from ._core._eventloop import get_async_backend, get_cancelled_exc_class, threadlocals
from ._core._synchronization import Event
from ._core._tasks import CancelScope, create_task_group
from .abc import AsyncBackend
from .abc._tasks import TaskStatus
if sys.version_info >= (3, 11):
from typing import TypeVarTuple, Unpack
else:
from typing_extensions import TypeVarTuple, Unpack
T_Retval = TypeVar("T_Retval")
T_co = TypeVar("T_co", covariant=True)
PosArgsT = TypeVarTuple("PosArgsT")
def run(
func: Callable[[Unpack[PosArgsT]], Awaitable[T_Retval]], *args: Unpack[PosArgsT]
) -> T_Retval:
"""
Call a coroutine function from a worker thread.
:param func: a coroutine function
:param args: positional arguments for the callable
:return: the return value of the coroutine function
"""
try:
async_backend = threadlocals.current_async_backend
token = threadlocals.current_token
except AttributeError:
raise RuntimeError(
"This function can only be run from an AnyIO worker thread"
) from None
return async_backend.run_async_from_thread(func, args, token=token)
def run_sync(
func: Callable[[Unpack[PosArgsT]], T_Retval], *args: Unpack[PosArgsT]
) -> T_Retval:
"""
Call a function in the event loop thread from a worker thread.
:param func: a callable
:param args: positional arguments for the callable
:return: the return value of the callable
"""
try:
async_backend = threadlocals.current_async_backend
token = threadlocals.current_token
except AttributeError:
raise RuntimeError(
"This function can only be run from an AnyIO worker thread"
) from None
return async_backend.run_sync_from_thread(func, args, token=token)
class _BlockingAsyncContextManager(Generic[T_co], AbstractContextManager):
_enter_future: Future[T_co]
_exit_future: Future[bool | None]
_exit_event: Event
_exit_exc_info: tuple[
type[BaseException] | None, BaseException | None, TracebackType | None
] = (None, None, None)
def __init__(
self, async_cm: AbstractAsyncContextManager[T_co], portal: BlockingPortal
):
self._async_cm = async_cm
self._portal = portal
async def run_async_cm(self) -> bool | None:
try:
self._exit_event = Event()
value = await self._async_cm.__aenter__()
except BaseException as exc:
self._enter_future.set_exception(exc)
raise
else:
self._enter_future.set_result(value)
try:
# Wait for the sync context manager to exit.
# This next statement can raise `get_cancelled_exc_class()` if
# something went wrong in a task group in this async context
# manager.
await self._exit_event.wait()
finally:
# In case of cancellation, it could be that we end up here before
# `_BlockingAsyncContextManager.__exit__` is called, and an
# `_exit_exc_info` has been set.
result = await self._async_cm.__aexit__(*self._exit_exc_info)
return result
def __enter__(self) -> T_co:
self._enter_future = Future()
self._exit_future = self._portal.start_task_soon(self.run_async_cm)
return self._enter_future.result()
def __exit__(
self,
__exc_type: type[BaseException] | None,
__exc_value: BaseException | None,
__traceback: TracebackType | None,
) -> bool | None:
self._exit_exc_info = __exc_type, __exc_value, __traceback
self._portal.call(self._exit_event.set)
return self._exit_future.result()
class _BlockingPortalTaskStatus(TaskStatus):
def __init__(self, future: Future):
self._future = future
def started(self, value: object = None) -> None:
self._future.set_result(value)
class BlockingPortal:
"""An object that lets external threads run code in an asynchronous event loop."""
def __new__(cls) -> BlockingPortal:
return get_async_backend().create_blocking_portal()
def __init__(self) -> None:
self._event_loop_thread_id: int | None = get_ident()
self._stop_event = Event()
self._task_group = create_task_group()
self._cancelled_exc_class = get_cancelled_exc_class()
async def __aenter__(self) -> BlockingPortal:
await self._task_group.__aenter__()
return self
async def __aexit__(
self,
exc_type: type[BaseException] | None,
exc_val: BaseException | None,
exc_tb: TracebackType | None,
) -> bool | None:
await self.stop()
return await self._task_group.__aexit__(exc_type, exc_val, exc_tb)
def _check_running(self) -> None:
if self._event_loop_thread_id is None:
raise RuntimeError("This portal is not running")
if self._event_loop_thread_id == get_ident():
raise RuntimeError(
"This method cannot be called from the event loop thread"
)
async def sleep_until_stopped(self) -> None:
"""Sleep until :meth:`stop` is called."""
await self._stop_event.wait()
async def stop(self, cancel_remaining: bool = False) -> None:
"""
Signal the portal to shut down.
This marks the portal as no longer accepting new calls and exits from
:meth:`sleep_until_stopped`.
:param cancel_remaining: ``True`` to cancel all the remaining tasks, ``False``
to let them finish before returning
"""
self._event_loop_thread_id = None
self._stop_event.set()
if cancel_remaining:
self._task_group.cancel_scope.cancel()
async def _call_func(
self,
func: Callable[[Unpack[PosArgsT]], Awaitable[T_Retval] | T_Retval],
args: tuple[Unpack[PosArgsT]],
kwargs: dict[str, Any],
future: Future[T_Retval],
) -> None:
def callback(f: Future[T_Retval]) -> None:
if f.cancelled() and self._event_loop_thread_id not in (
None,
get_ident(),
):
self.call(scope.cancel)
try:
retval_or_awaitable = func(*args, **kwargs)
if isawaitable(retval_or_awaitable):
with CancelScope() as scope:
if future.cancelled():
scope.cancel()
else:
future.add_done_callback(callback)
retval = await retval_or_awaitable
else:
retval = retval_or_awaitable
except self._cancelled_exc_class:
future.cancel()
future.set_running_or_notify_cancel()
except BaseException as exc:
if not future.cancelled():
future.set_exception(exc)
# Let base exceptions fall through
if not isinstance(exc, Exception):
raise
else:
if not future.cancelled():
future.set_result(retval)
finally:
scope = None # type: ignore[assignment]
def _spawn_task_from_thread(
self,
func: Callable[[Unpack[PosArgsT]], Awaitable[T_Retval] | T_Retval],
args: tuple[Unpack[PosArgsT]],
kwargs: dict[str, Any],
name: object,
future: Future[T_Retval],
) -> None:
"""
Spawn a new task using the given callable.
Implementers must ensure that the future is resolved when the task finishes.
:param func: a callable
:param args: positional arguments to be passed to the callable
:param kwargs: keyword arguments to be passed to the callable
:param name: name of the task (will be coerced to a string if not ``None``)
:param future: a future that will resolve to the return value of the callable,
or the exception raised during its execution
"""
raise NotImplementedError
@overload
def call(
self,
func: Callable[[Unpack[PosArgsT]], Awaitable[T_Retval]],
*args: Unpack[PosArgsT],
) -> T_Retval: ...
@overload
def call(
self, func: Callable[[Unpack[PosArgsT]], T_Retval], *args: Unpack[PosArgsT]
) -> T_Retval: ...
def call(
self,
func: Callable[[Unpack[PosArgsT]], Awaitable[T_Retval] | T_Retval],
*args: Unpack[PosArgsT],
) -> T_Retval:
"""
Call the given function in the event loop thread.
If the callable returns a coroutine object, it is awaited on.
:param func: any callable
:raises RuntimeError: if the portal is not running or if this method is called
from within the event loop thread
"""
return cast(T_Retval, self.start_task_soon(func, *args).result())
@overload
def start_task_soon(
self,
func: Callable[[Unpack[PosArgsT]], Awaitable[T_Retval]],
*args: Unpack[PosArgsT],
name: object = None,
) -> Future[T_Retval]: ...
@overload
def start_task_soon(
self,
func: Callable[[Unpack[PosArgsT]], T_Retval],
*args: Unpack[PosArgsT],
name: object = None,
) -> Future[T_Retval]: ...
def start_task_soon(
self,
func: Callable[[Unpack[PosArgsT]], Awaitable[T_Retval] | T_Retval],
*args: Unpack[PosArgsT],
name: object = None,
) -> Future[T_Retval]:
"""
Start a task in the portal's task group.
The task will be run inside a cancel scope which can be cancelled by cancelling
the returned future.
:param func: the target function
:param args: positional arguments passed to ``func``
:param name: name of the task (will be coerced to a string if not ``None``)
:return: a future that resolves with the return value of the callable if the
task completes successfully, or with the exception raised in the task
:raises RuntimeError: if the portal is not running or if this method is called
from within the event loop thread
:rtype: concurrent.futures.Future[T_Retval]
.. versionadded:: 3.0
"""
self._check_running()
f: Future[T_Retval] = Future()
self._spawn_task_from_thread(func, args, {}, name, f)
return f
def start_task(
self,
func: Callable[..., Awaitable[T_Retval]],
*args: object,
name: object = None,
) -> tuple[Future[T_Retval], Any]:
"""
Start a task in the portal's task group and wait until it signals for readiness.
This method works the same way as :meth:`.abc.TaskGroup.start`.
:param func: the target function
:param args: positional arguments passed to ``func``
:param name: name of the task (will be coerced to a string if not ``None``)
:return: a tuple of (future, task_status_value) where the ``task_status_value``
is the value passed to ``task_status.started()`` from within the target
function
:rtype: tuple[concurrent.futures.Future[T_Retval], Any]
.. versionadded:: 3.0
"""
def task_done(future: Future[T_Retval]) -> None:
if not task_status_future.done():
if future.cancelled():
task_status_future.cancel()
elif future.exception():
task_status_future.set_exception(future.exception())
else:
exc = RuntimeError(
"Task exited without calling task_status.started()"
)
task_status_future.set_exception(exc)
self._check_running()
task_status_future: Future = Future()
task_status = _BlockingPortalTaskStatus(task_status_future)
f: Future = Future()
f.add_done_callback(task_done)
self._spawn_task_from_thread(func, args, {"task_status": task_status}, name, f)
return f, task_status_future.result()
def wrap_async_context_manager(
self, cm: AbstractAsyncContextManager[T_co]
) -> AbstractContextManager[T_co]:
"""
Wrap an async context manager as a synchronous context manager via this portal.
Spawns a task that will call both ``__aenter__()`` and ``__aexit__()``, stopping
in the middle until the synchronous context manager exits.
:param cm: an asynchronous context manager
:return: a synchronous context manager
.. versionadded:: 2.1
"""
return _BlockingAsyncContextManager(cm, self)
@dataclass
class BlockingPortalProvider:
"""
A manager for a blocking portal. Used as a context manager. The first thread to
enter this context manager causes a blocking portal to be started with the specific
parameters, and the last thread to exit causes the portal to be shut down. Thus,
there will be exactly one blocking portal running in this context as long as at
least one thread has entered this context manager.
The parameters are the same as for :func:`~anyio.run`.
:param backend: name of the backend
:param backend_options: backend options
.. versionadded:: 4.4
"""
backend: str = "asyncio"
backend_options: dict[str, Any] | None = None
_lock: Lock = field(init=False, default_factory=Lock)
_leases: int = field(init=False, default=0)
_portal: BlockingPortal = field(init=False)
_portal_cm: AbstractContextManager[BlockingPortal] | None = field(
init=False, default=None
)
def __enter__(self) -> BlockingPortal:
with self._lock:
if self._portal_cm is None:
self._portal_cm = start_blocking_portal(
self.backend, self.backend_options
)
self._portal = self._portal_cm.__enter__()
self._leases += 1
return self._portal
def __exit__(
self,
exc_type: type[BaseException] | None,
exc_val: BaseException | None,
exc_tb: TracebackType | None,
) -> None:
portal_cm: AbstractContextManager[BlockingPortal] | None = None
with self._lock:
assert self._portal_cm
assert self._leases > 0
self._leases -= 1
if not self._leases:
portal_cm = self._portal_cm
self._portal_cm = None
del self._portal
if portal_cm:
portal_cm.__exit__(None, None, None)
@contextmanager
def start_blocking_portal(
backend: str = "asyncio", backend_options: dict[str, Any] | None = None
) -> Generator[BlockingPortal, Any, None]:
"""
Start a new event loop in a new thread and run a blocking portal in its main task.
The parameters are the same as for :func:`~anyio.run`.
:param backend: name of the backend
:param backend_options: backend options
:return: a context manager that yields a blocking portal
.. versionchanged:: 3.0
Usage as a context manager is now required.
"""
async def run_portal() -> None:
async with BlockingPortal() as portal_:
future.set_result(portal_)
await portal_.sleep_until_stopped()
def run_blocking_portal() -> None:
if future.set_running_or_notify_cancel():
try:
_eventloop.run(
run_portal, backend=backend, backend_options=backend_options
)
except BaseException as exc:
if not future.done():
future.set_exception(exc)
future: Future[BlockingPortal] = Future()
thread = Thread(target=run_blocking_portal, daemon=True)
thread.start()
try:
cancel_remaining_tasks = False
portal = future.result()
try:
yield portal
except BaseException:
cancel_remaining_tasks = True
raise
finally:
try:
portal.call(portal.stop, cancel_remaining_tasks)
except RuntimeError:
pass
finally:
thread.join()
def check_cancelled() -> None:
"""
Check if the cancel scope of the host task's running the current worker thread has
been cancelled.
If the host task's current cancel scope has indeed been cancelled, the
backend-specific cancellation exception will be raised.
:raises RuntimeError: if the current thread was not spawned by
:func:`.to_thread.run_sync`
"""
try:
async_backend: AsyncBackend = threadlocals.current_async_backend
except AttributeError:
raise RuntimeError(
"This function can only be run from an AnyIO worker thread"
) from None
async_backend.check_cancelled()
|
from __future__ import annotations
import enum
from dataclasses import dataclass
from typing import Any, Generic, Literal, TypeVar, overload
from weakref import WeakKeyDictionary
from ._core._eventloop import get_async_backend
T = TypeVar("T")
D = TypeVar("D")
async def checkpoint() -> None:
"""
Check for cancellation and allow the scheduler to switch to another task.
Equivalent to (but more efficient than)::
await checkpoint_if_cancelled()
await cancel_shielded_checkpoint()
.. versionadded:: 3.0
"""
await get_async_backend().checkpoint()
async def checkpoint_if_cancelled() -> None:
"""
Enter a checkpoint if the enclosing cancel scope has been cancelled.
This does not allow the scheduler to switch to a different task.
.. versionadded:: 3.0
"""
await get_async_backend().checkpoint_if_cancelled()
async def cancel_shielded_checkpoint() -> None:
"""
Allow the scheduler to switch to another task but without checking for cancellation.
Equivalent to (but potentially more efficient than)::
with CancelScope(shield=True):
await checkpoint()
.. versionadded:: 3.0
"""
await get_async_backend().cancel_shielded_checkpoint()
def current_token() -> object:
"""
Return a backend specific token object that can be used to get back to the event
loop.
"""
return get_async_backend().current_token()
_run_vars: WeakKeyDictionary[Any, dict[str, Any]] = WeakKeyDictionary()
_token_wrappers: dict[Any, _TokenWrapper] = {}
@dataclass(frozen=True)
class _TokenWrapper:
__slots__ = "_token", "__weakref__"
_token: object
class _NoValueSet(enum.Enum):
NO_VALUE_SET = enum.auto()
class RunvarToken(Generic[T]):
__slots__ = "_var", "_value", "_redeemed"
def __init__(self, var: RunVar[T], value: T | Literal[_NoValueSet.NO_VALUE_SET]):
self._var = var
self._value: T | Literal[_NoValueSet.NO_VALUE_SET] = value
self._redeemed = False
class RunVar(Generic[T]):
"""
Like a :class:`~contextvars.ContextVar`, except scoped to the running event loop.
"""
__slots__ = "_name", "_default"
NO_VALUE_SET: Literal[_NoValueSet.NO_VALUE_SET] = _NoValueSet.NO_VALUE_SET
_token_wrappers: set[_TokenWrapper] = set()
def __init__(
self, name: str, default: T | Literal[_NoValueSet.NO_VALUE_SET] = NO_VALUE_SET
):
self._name = name
self._default = default
@property
def _current_vars(self) -> dict[str, T]:
token = current_token()
try:
return _run_vars[token]
except KeyError:
run_vars = _run_vars[token] = {}
return run_vars
@overload
def get(self, default: D) -> T | D: ...
@overload
def get(self) -> T: ...
def get(
self, default: D | Literal[_NoValueSet.NO_VALUE_SET] = NO_VALUE_SET
) -> T | D:
try:
return self._current_vars[self._name]
except KeyError:
if default is not RunVar.NO_VALUE_SET:
return default
elif self._default is not RunVar.NO_VALUE_SET:
return self._default
raise LookupError(
f'Run variable "{self._name}" has no value and no default set'
)
def set(self, value: T) -> RunvarToken[T]:
current_vars = self._current_vars
token = RunvarToken(self, current_vars.get(self._name, RunVar.NO_VALUE_SET))
current_vars[self._name] = value
return token
def reset(self, token: RunvarToken[T]) -> None:
if token._var is not self:
raise ValueError("This token does not belong to this RunVar")
if token._redeemed:
raise ValueError("This token has already been used")
if token._value is _NoValueSet.NO_VALUE_SET:
try:
del self._current_vars[self._name]
except KeyError:
pass
else:
self._current_vars[self._name] = token._value
token._redeemed = True
def __repr__(self) -> str:
return f"<RunVar name={self._name!r}>"
|
from __future__ import annotations
import socket
import sys
from collections.abc import Callable, Generator, Iterator
from contextlib import ExitStack, contextmanager
from inspect import isasyncgenfunction, iscoroutinefunction, ismethod
from typing import Any, cast
import pytest
import sniffio
from _pytest.fixtures import SubRequest
from _pytest.outcomes import Exit
from ._core._eventloop import get_all_backends, get_async_backend
from ._core._exceptions import iterate_exceptions
from .abc import TestRunner
if sys.version_info < (3, 11):
from exceptiongroup import ExceptionGroup
_current_runner: TestRunner | None = None
_runner_stack: ExitStack | None = None
_runner_leases = 0
def extract_backend_and_options(backend: object) -> tuple[str, dict[str, Any]]:
if isinstance(backend, str):
return backend, {}
elif isinstance(backend, tuple) and len(backend) == 2:
if isinstance(backend[0], str) and isinstance(backend[1], dict):
return cast(tuple[str, dict[str, Any]], backend)
raise TypeError("anyio_backend must be either a string or tuple of (string, dict)")
@contextmanager
def get_runner(
backend_name: str, backend_options: dict[str, Any]
) -> Iterator[TestRunner]:
global _current_runner, _runner_leases, _runner_stack
if _current_runner is None:
asynclib = get_async_backend(backend_name)
_runner_stack = ExitStack()
if sniffio.current_async_library_cvar.get(None) is None:
# Since we're in control of the event loop, we can cache the name of the
# async library
token = sniffio.current_async_library_cvar.set(backend_name)
_runner_stack.callback(sniffio.current_async_library_cvar.reset, token)
backend_options = backend_options or {}
_current_runner = _runner_stack.enter_context(
asynclib.create_test_runner(backend_options)
)
_runner_leases += 1
try:
yield _current_runner
finally:
_runner_leases -= 1
if not _runner_leases:
assert _runner_stack is not None
_runner_stack.close()
_runner_stack = _current_runner = None
def pytest_configure(config: Any) -> None:
config.addinivalue_line(
"markers",
"anyio: mark the (coroutine function) test to be run asynchronously via anyio.",
)
@pytest.hookimpl(hookwrapper=True)
def pytest_fixture_setup(fixturedef: Any, request: Any) -> Generator[Any]:
def wrapper(
*args: Any, anyio_backend: Any, request: SubRequest, **kwargs: Any
) -> Any:
# Rebind any fixture methods to the request instance
if (
request.instance
and ismethod(func)
and type(func.__self__) is type(request.instance)
):
local_func = func.__func__.__get__(request.instance)
else:
local_func = func
backend_name, backend_options = extract_backend_and_options(anyio_backend)
if has_backend_arg:
kwargs["anyio_backend"] = anyio_backend
if has_request_arg:
kwargs["request"] = request
with get_runner(backend_name, backend_options) as runner:
if isasyncgenfunction(local_func):
yield from runner.run_asyncgen_fixture(local_func, kwargs)
else:
yield runner.run_fixture(local_func, kwargs)
# Only apply this to coroutine functions and async generator functions in requests
# that involve the anyio_backend fixture
func = fixturedef.func
if isasyncgenfunction(func) or iscoroutinefunction(func):
if "anyio_backend" in request.fixturenames:
fixturedef.func = wrapper
original_argname = fixturedef.argnames
if not (has_backend_arg := "anyio_backend" in fixturedef.argnames):
fixturedef.argnames += ("anyio_backend",)
if not (has_request_arg := "request" in fixturedef.argnames):
fixturedef.argnames += ("request",)
try:
return (yield)
finally:
fixturedef.func = func
fixturedef.argnames = original_argname
return (yield)
@pytest.hookimpl(tryfirst=True)
def pytest_pycollect_makeitem(collector: Any, name: Any, obj: Any) -> None:
if collector.istestfunction(obj, name):
inner_func = obj.hypothesis.inner_test if hasattr(obj, "hypothesis") else obj
if iscoroutinefunction(inner_func):
marker = collector.get_closest_marker("anyio")
own_markers = getattr(obj, "pytestmark", ())
if marker or any(marker.name == "anyio" for marker in own_markers):
pytest.mark.usefixtures("anyio_backend")(obj)
@pytest.hookimpl(tryfirst=True)
def pytest_pyfunc_call(pyfuncitem: Any) -> bool | None:
def run_with_hypothesis(**kwargs: Any) -> None:
with get_runner(backend_name, backend_options) as runner:
runner.run_test(original_func, kwargs)
backend = pyfuncitem.funcargs.get("anyio_backend")
if backend:
backend_name, backend_options = extract_backend_and_options(backend)
if hasattr(pyfuncitem.obj, "hypothesis"):
# Wrap the inner test function unless it's already wrapped
original_func = pyfuncitem.obj.hypothesis.inner_test
if original_func.__qualname__ != run_with_hypothesis.__qualname__:
if iscoroutinefunction(original_func):
pyfuncitem.obj.hypothesis.inner_test = run_with_hypothesis
return None
if iscoroutinefunction(pyfuncitem.obj):
funcargs = pyfuncitem.funcargs
testargs = {arg: funcargs[arg] for arg in pyfuncitem._fixtureinfo.argnames}
with get_runner(backend_name, backend_options) as runner:
try:
runner.run_test(pyfuncitem.obj, testargs)
except ExceptionGroup as excgrp:
for exc in iterate_exceptions(excgrp):
if isinstance(exc, (Exit, KeyboardInterrupt, SystemExit)):
raise exc from excgrp
raise
return True
return None
@pytest.fixture(scope="module", params=get_all_backends())
def anyio_backend(request: Any) -> Any:
return request.param
@pytest.fixture
def anyio_backend_name(anyio_backend: Any) -> str:
if isinstance(anyio_backend, str):
return anyio_backend
else:
return anyio_backend[0]
@pytest.fixture
def anyio_backend_options(anyio_backend: Any) -> dict[str, Any]:
if isinstance(anyio_backend, str):
return {}
else:
return anyio_backend[1]
class FreePortFactory:
"""
Manages port generation based on specified socket kind, ensuring no duplicate
ports are generated.
This class provides functionality for generating available free ports on the
system. It is initialized with a specific socket kind and can generate ports
for given address families while avoiding reuse of previously generated ports.
Users should not instantiate this class directly, but use the
``free_tcp_port_factory`` and ``free_udp_port_factory`` fixtures instead. For simple
uses cases, ``free_tcp_port`` and ``free_udp_port`` can be used instead.
"""
def __init__(self, kind: socket.SocketKind) -> None:
self._kind = kind
self._generated = set[int]()
@property
def kind(self) -> socket.SocketKind:
"""
The type of socket connection (e.g., :data:`~socket.SOCK_STREAM` or
:data:`~socket.SOCK_DGRAM`) used to bind for checking port availability
"""
return self._kind
def __call__(self, family: socket.AddressFamily | None = None) -> int:
"""
Return an unbound port for the given address family.
:param family: if omitted, both IPv4 and IPv6 addresses will be tried
:return: a port number
"""
if family is not None:
families = [family]
else:
families = [socket.AF_INET]
if socket.has_ipv6:
families.append(socket.AF_INET6)
while True:
port = 0
with ExitStack() as stack:
for family in families:
sock = stack.enter_context(socket.socket(family, self._kind))
addr = "::1" if family == socket.AF_INET6 else "127.0.0.1"
try:
sock.bind((addr, port))
except OSError:
break
if not port:
port = sock.getsockname()[1]
else:
if port not in self._generated:
self._generated.add(port)
return port
@pytest.fixture(scope="session")
def free_tcp_port_factory() -> FreePortFactory:
return FreePortFactory(socket.SOCK_STREAM)
@pytest.fixture(scope="session")
def free_udp_port_factory() -> FreePortFactory:
return FreePortFactory(socket.SOCK_DGRAM)
@pytest.fixture
def free_tcp_port(free_tcp_port_factory: Callable[[], int]) -> int:
return free_tcp_port_factory()
@pytest.fixture
def free_udp_port(free_udp_port_factory: Callable[[], int]) -> int:
return free_udp_port_factory()
|
from __future__ import annotations
import atexit
import os
import pickle
import sys
from collections import deque
from collections.abc import Callable
from textwrap import dedent
from typing import Any, Final, TypeVar
from . import current_time, to_thread
from ._core._exceptions import BrokenWorkerIntepreter
from ._core._synchronization import CapacityLimiter
from .lowlevel import RunVar
if sys.version_info >= (3, 11):
from typing import TypeVarTuple, Unpack
else:
from typing_extensions import TypeVarTuple, Unpack
UNBOUND: Final = 2 # I have no clue how this works, but it was used in the stdlib
FMT_UNPICKLED: Final = 0
FMT_PICKLED: Final = 1
DEFAULT_CPU_COUNT: Final = 8 # this is just an arbitrarily selected value
MAX_WORKER_IDLE_TIME = (
30 # seconds a subinterpreter can be idle before becoming eligible for pruning
)
T_Retval = TypeVar("T_Retval")
PosArgsT = TypeVarTuple("PosArgsT")
_idle_workers = RunVar[deque["Worker"]]("_available_workers")
_default_interpreter_limiter = RunVar[CapacityLimiter]("_default_interpreter_limiter")
class Worker:
_run_func = compile(
dedent("""
import _interpqueues as queues
import _interpreters as interpreters
from pickle import loads, dumps, HIGHEST_PROTOCOL
item = queues.get(queue_id)[0]
try:
func, args = loads(item)
retval = func(*args)
except BaseException as exc:
is_exception = True
retval = exc
else:
is_exception = False
try:
queues.put(queue_id, (retval, is_exception), FMT_UNPICKLED, UNBOUND)
except interpreters.NotShareableError:
retval = dumps(retval, HIGHEST_PROTOCOL)
queues.put(queue_id, (retval, is_exception), FMT_PICKLED, UNBOUND)
"""),
"<string>",
"exec",
)
last_used: float = 0
_initialized: bool = False
_interpreter_id: int
_queue_id: int
def initialize(self) -> None:
import _interpqueues as queues
import _interpreters as interpreters
self._interpreter_id = interpreters.create()
self._queue_id = queues.create(2, FMT_UNPICKLED, UNBOUND)
self._initialized = True
interpreters.set___main___attrs(
self._interpreter_id,
{
"queue_id": self._queue_id,
"FMT_PICKLED": FMT_PICKLED,
"FMT_UNPICKLED": FMT_UNPICKLED,
"UNBOUND": UNBOUND,
},
)
def destroy(self) -> None:
import _interpqueues as queues
import _interpreters as interpreters
if self._initialized:
interpreters.destroy(self._interpreter_id)
queues.destroy(self._queue_id)
def _call(
self,
func: Callable[..., T_Retval],
args: tuple[Any],
) -> tuple[Any, bool]:
import _interpqueues as queues
import _interpreters as interpreters
if not self._initialized:
self.initialize()
payload = pickle.dumps((func, args), pickle.HIGHEST_PROTOCOL)
queues.put(self._queue_id, payload, FMT_PICKLED, UNBOUND)
res: Any
is_exception: bool
if exc_info := interpreters.exec(self._interpreter_id, self._run_func):
raise BrokenWorkerIntepreter(exc_info)
(res, is_exception), fmt = queues.get(self._queue_id)[:2]
if fmt == FMT_PICKLED:
res = pickle.loads(res)
return res, is_exception
async def call(
self,
func: Callable[..., T_Retval],
args: tuple[Any],
limiter: CapacityLimiter,
) -> T_Retval:
result, is_exception = await to_thread.run_sync(
self._call,
func,
args,
limiter=limiter,
)
if is_exception:
raise result
return result
def _stop_workers(workers: deque[Worker]) -> None:
for worker in workers:
worker.destroy()
workers.clear()
async def run_sync(
func: Callable[[Unpack[PosArgsT]], T_Retval],
*args: Unpack[PosArgsT],
limiter: CapacityLimiter | None = None,
) -> T_Retval:
"""
Call the given function with the given arguments in a subinterpreter.
If the ``cancellable`` option is enabled and the task waiting for its completion is
cancelled, the call will still run its course but its return value (or any raised
exception) will be ignored.
.. warning:: This feature is **experimental**. The upstream interpreter API has not
yet been finalized or thoroughly tested, so don't rely on this for anything
mission critical.
:param func: a callable
:param args: positional arguments for the callable
:param limiter: capacity limiter to use to limit the total amount of subinterpreters
running (if omitted, the default limiter is used)
:return: the result of the call
:raises BrokenWorkerIntepreter: if there's an internal error in a subinterpreter
"""
if sys.version_info <= (3, 13):
raise RuntimeError("subinterpreters require at least Python 3.13")
if limiter is None:
limiter = current_default_interpreter_limiter()
try:
idle_workers = _idle_workers.get()
except LookupError:
idle_workers = deque()
_idle_workers.set(idle_workers)
atexit.register(_stop_workers, idle_workers)
async with limiter:
try:
worker = idle_workers.pop()
except IndexError:
worker = Worker()
try:
return await worker.call(func, args, limiter)
finally:
# Prune workers that have been idle for too long
now = current_time()
while idle_workers:
if now - idle_workers[0].last_used <= MAX_WORKER_IDLE_TIME:
break
await to_thread.run_sync(idle_workers.popleft().destroy, limiter=limiter)
worker.last_used = current_time()
idle_workers.append(worker)
def current_default_interpreter_limiter() -> CapacityLimiter:
"""
Return the capacity limiter that is used by default to limit the number of
concurrently running subinterpreters.
Defaults to the number of CPU cores.
:return: a capacity limiter object
"""
try:
return _default_interpreter_limiter.get()
except LookupError:
limiter = CapacityLimiter(os.cpu_count() or DEFAULT_CPU_COUNT)
_default_interpreter_limiter.set(limiter)
return limiter
|
from __future__ import annotations
import os
import pickle
import subprocess
import sys
from collections import deque
from collections.abc import Callable
from importlib.util import module_from_spec, spec_from_file_location
from typing import TypeVar, cast
from ._core._eventloop import current_time, get_async_backend, get_cancelled_exc_class
from ._core._exceptions import BrokenWorkerProcess
from ._core._subprocesses import open_process
from ._core._synchronization import CapacityLimiter
from ._core._tasks import CancelScope, fail_after
from .abc import ByteReceiveStream, ByteSendStream, Process
from .lowlevel import RunVar, checkpoint_if_cancelled
from .streams.buffered import BufferedByteReceiveStream
if sys.version_info >= (3, 11):
from typing import TypeVarTuple, Unpack
else:
from typing_extensions import TypeVarTuple, Unpack
WORKER_MAX_IDLE_TIME = 300 # 5 minutes
T_Retval = TypeVar("T_Retval")
PosArgsT = TypeVarTuple("PosArgsT")
_process_pool_workers: RunVar[set[Process]] = RunVar("_process_pool_workers")
_process_pool_idle_workers: RunVar[deque[tuple[Process, float]]] = RunVar(
"_process_pool_idle_workers"
)
_default_process_limiter: RunVar[CapacityLimiter] = RunVar("_default_process_limiter")
async def run_sync( # type: ignore[return]
func: Callable[[Unpack[PosArgsT]], T_Retval],
*args: Unpack[PosArgsT],
cancellable: bool = False,
limiter: CapacityLimiter | None = None,
) -> T_Retval:
"""
Call the given function with the given arguments in a worker process.
If the ``cancellable`` option is enabled and the task waiting for its completion is
cancelled, the worker process running it will be abruptly terminated using SIGKILL
(or ``terminateProcess()`` on Windows).
:param func: a callable
:param args: positional arguments for the callable
:param cancellable: ``True`` to allow cancellation of the operation while it's
running
:param limiter: capacity limiter to use to limit the total amount of processes
running (if omitted, the default limiter is used)
:return: an awaitable that yields the return value of the function.
"""
async def send_raw_command(pickled_cmd: bytes) -> object:
try:
await stdin.send(pickled_cmd)
response = await buffered.receive_until(b"\n", 50)
status, length = response.split(b" ")
if status not in (b"RETURN", b"EXCEPTION"):
raise RuntimeError(
f"Worker process returned unexpected response: {response!r}"
)
pickled_response = await buffered.receive_exactly(int(length))
except BaseException as exc:
workers.discard(process)
try:
process.kill()
with CancelScope(shield=True):
await process.aclose()
except ProcessLookupError:
pass
if isinstance(exc, get_cancelled_exc_class()):
raise
else:
raise BrokenWorkerProcess from exc
retval = pickle.loads(pickled_response)
if status == b"EXCEPTION":
assert isinstance(retval, BaseException)
raise retval
else:
return retval
# First pickle the request before trying to reserve a worker process
await checkpoint_if_cancelled()
request = pickle.dumps(("run", func, args), protocol=pickle.HIGHEST_PROTOCOL)
# If this is the first run in this event loop thread, set up the necessary variables
try:
workers = _process_pool_workers.get()
idle_workers = _process_pool_idle_workers.get()
except LookupError:
workers = set()
idle_workers = deque()
_process_pool_workers.set(workers)
_process_pool_idle_workers.set(idle_workers)
get_async_backend().setup_process_pool_exit_at_shutdown(workers)
async with limiter or current_default_process_limiter():
# Pop processes from the pool (starting from the most recently used) until we
# find one that hasn't exited yet
process: Process
while idle_workers:
process, idle_since = idle_workers.pop()
if process.returncode is None:
stdin = cast(ByteSendStream, process.stdin)
buffered = BufferedByteReceiveStream(
cast(ByteReceiveStream, process.stdout)
)
# Prune any other workers that have been idle for WORKER_MAX_IDLE_TIME
# seconds or longer
now = current_time()
killed_processes: list[Process] = []
while idle_workers:
if now - idle_workers[0][1] < WORKER_MAX_IDLE_TIME:
break
process_to_kill, idle_since = idle_workers.popleft()
process_to_kill.kill()
workers.remove(process_to_kill)
killed_processes.append(process_to_kill)
with CancelScope(shield=True):
for killed_process in killed_processes:
await killed_process.aclose()
break
workers.remove(process)
else:
command = [sys.executable, "-u", "-m", __name__]
process = await open_process(
command, stdin=subprocess.PIPE, stdout=subprocess.PIPE
)
try:
stdin = cast(ByteSendStream, process.stdin)
buffered = BufferedByteReceiveStream(
cast(ByteReceiveStream, process.stdout)
)
with fail_after(20):
message = await buffered.receive(6)
if message != b"READY\n":
raise BrokenWorkerProcess(
f"Worker process returned unexpected response: {message!r}"
)
main_module_path = getattr(sys.modules["__main__"], "__file__", None)
pickled = pickle.dumps(
("init", sys.path, main_module_path),
protocol=pickle.HIGHEST_PROTOCOL,
)
await send_raw_command(pickled)
except (BrokenWorkerProcess, get_cancelled_exc_class()):
raise
except BaseException as exc:
process.kill()
raise BrokenWorkerProcess(
"Error during worker process initialization"
) from exc
workers.add(process)
with CancelScope(shield=not cancellable):
try:
return cast(T_Retval, await send_raw_command(request))
finally:
if process in workers:
idle_workers.append((process, current_time()))
def current_default_process_limiter() -> CapacityLimiter:
"""
Return the capacity limiter that is used by default to limit the number of worker
processes.
:return: a capacity limiter object
"""
try:
return _default_process_limiter.get()
except LookupError:
limiter = CapacityLimiter(os.cpu_count() or 2)
_default_process_limiter.set(limiter)
return limiter
def process_worker() -> None:
# Redirect standard streams to os.devnull so that user code won't interfere with the
# parent-worker communication
stdin = sys.stdin
stdout = sys.stdout
sys.stdin = open(os.devnull)
sys.stdout = open(os.devnull, "w")
stdout.buffer.write(b"READY\n")
while True:
retval = exception = None
try:
command, *args = pickle.load(stdin.buffer)
except EOFError:
return
except BaseException as exc:
exception = exc
else:
if command == "run":
func, args = args
try:
retval = func(*args)
except BaseException as exc:
exception = exc
elif command == "init":
main_module_path: str | None
sys.path, main_module_path = args
del sys.modules["__main__"]
if main_module_path and os.path.isfile(main_module_path):
# Load the parent's main module but as __mp_main__ instead of
# __main__ (like multiprocessing does) to avoid infinite recursion
try:
spec = spec_from_file_location("__mp_main__", main_module_path)
if spec and spec.loader:
main = module_from_spec(spec)
spec.loader.exec_module(main)
sys.modules["__main__"] = main
except BaseException as exc:
exception = exc
try:
if exception is not None:
status = b"EXCEPTION"
pickled = pickle.dumps(exception, pickle.HIGHEST_PROTOCOL)
else:
status = b"RETURN"
pickled = pickle.dumps(retval, pickle.HIGHEST_PROTOCOL)
except BaseException as exc:
exception = exc
status = b"EXCEPTION"
pickled = pickle.dumps(exc, pickle.HIGHEST_PROTOCOL)
stdout.buffer.write(b"%s %d\n" % (status, len(pickled)))
stdout.buffer.write(pickled)
# Respect SIGTERM
if isinstance(exception, SystemExit):
raise exception
if __name__ == "__main__":
process_worker()
|
from __future__ import annotations
import sys
from collections.abc import Callable
from typing import TypeVar
from warnings import warn
from ._core._eventloop import get_async_backend
from .abc import CapacityLimiter
if sys.version_info >= (3, 11):
from typing import TypeVarTuple, Unpack
else:
from typing_extensions import TypeVarTuple, Unpack
T_Retval = TypeVar("T_Retval")
PosArgsT = TypeVarTuple("PosArgsT")
async def run_sync(
func: Callable[[Unpack[PosArgsT]], T_Retval],
*args: Unpack[PosArgsT],
abandon_on_cancel: bool = False,
cancellable: bool | None = None,
limiter: CapacityLimiter | None = None,
) -> T_Retval:
"""
Call the given function with the given arguments in a worker thread.
If the ``cancellable`` option is enabled and the task waiting for its completion is
cancelled, the thread will still run its course but its return value (or any raised
exception) will be ignored.
:param func: a callable
:param args: positional arguments for the callable
:param abandon_on_cancel: ``True`` to abandon the thread (leaving it to run
unchecked on own) if the host task is cancelled, ``False`` to ignore
cancellations in the host task until the operation has completed in the worker
thread
:param cancellable: deprecated alias of ``abandon_on_cancel``; will override
``abandon_on_cancel`` if both parameters are passed
:param limiter: capacity limiter to use to limit the total amount of threads running
(if omitted, the default limiter is used)
:return: an awaitable that yields the return value of the function.
"""
if cancellable is not None:
abandon_on_cancel = cancellable
warn(
"The `cancellable=` keyword argument to `anyio.to_thread.run_sync` is "
"deprecated since AnyIO 4.1.0; use `abandon_on_cancel=` instead",
DeprecationWarning,
stacklevel=2,
)
return await get_async_backend().run_sync_in_worker_thread(
func, args, abandon_on_cancel=abandon_on_cancel, limiter=limiter
)
def current_default_thread_limiter() -> CapacityLimiter:
"""
Return the capacity limiter that is used by default to limit the number of
concurrent threads.
:return: a capacity limiter object
"""
return get_async_backend().current_default_thread_limiter()
|
from __future__ import annotations
from ._core._eventloop import current_time as current_time
from ._core._eventloop import get_all_backends as get_all_backends
from ._core._eventloop import get_cancelled_exc_class as get_cancelled_exc_class
from ._core._eventloop import run as run
from ._core._eventloop import sleep as sleep
from ._core._eventloop import sleep_forever as sleep_forever
from ._core._eventloop import sleep_until as sleep_until
from ._core._exceptions import BrokenResourceError as BrokenResourceError
from ._core._exceptions import BrokenWorkerIntepreter as BrokenWorkerIntepreter
from ._core._exceptions import BrokenWorkerProcess as BrokenWorkerProcess
from ._core._exceptions import BusyResourceError as BusyResourceError
from ._core._exceptions import ClosedResourceError as ClosedResourceError
from ._core._exceptions import DelimiterNotFound as DelimiterNotFound
from ._core._exceptions import EndOfStream as EndOfStream
from ._core._exceptions import IncompleteRead as IncompleteRead
from ._core._exceptions import TypedAttributeLookupError as TypedAttributeLookupError
from ._core._exceptions import WouldBlock as WouldBlock
from ._core._fileio import AsyncFile as AsyncFile
from ._core._fileio import Path as Path
from ._core._fileio import open_file as open_file
from ._core._fileio import wrap_file as wrap_file
from ._core._resources import aclose_forcefully as aclose_forcefully
from ._core._signals import open_signal_receiver as open_signal_receiver
from ._core._sockets import connect_tcp as connect_tcp
from ._core._sockets import connect_unix as connect_unix
from ._core._sockets import create_connected_udp_socket as create_connected_udp_socket
from ._core._sockets import (
create_connected_unix_datagram_socket as create_connected_unix_datagram_socket,
)
from ._core._sockets import create_tcp_listener as create_tcp_listener
from ._core._sockets import create_udp_socket as create_udp_socket
from ._core._sockets import create_unix_datagram_socket as create_unix_datagram_socket
from ._core._sockets import create_unix_listener as create_unix_listener
from ._core._sockets import getaddrinfo as getaddrinfo
from ._core._sockets import getnameinfo as getnameinfo
from ._core._sockets import wait_readable as wait_readable
from ._core._sockets import wait_socket_readable as wait_socket_readable
from ._core._sockets import wait_socket_writable as wait_socket_writable
from ._core._sockets import wait_writable as wait_writable
from ._core._streams import create_memory_object_stream as create_memory_object_stream
from ._core._subprocesses import open_process as open_process
from ._core._subprocesses import run_process as run_process
from ._core._synchronization import CapacityLimiter as CapacityLimiter
from ._core._synchronization import (
CapacityLimiterStatistics as CapacityLimiterStatistics,
)
from ._core._synchronization import Condition as Condition
from ._core._synchronization import ConditionStatistics as ConditionStatistics
from ._core._synchronization import Event as Event
from ._core._synchronization import EventStatistics as EventStatistics
from ._core._synchronization import Lock as Lock
from ._core._synchronization import LockStatistics as LockStatistics
from ._core._synchronization import ResourceGuard as ResourceGuard
from ._core._synchronization import Semaphore as Semaphore
from ._core._synchronization import SemaphoreStatistics as SemaphoreStatistics
from ._core._tasks import TASK_STATUS_IGNORED as TASK_STATUS_IGNORED
from ._core._tasks import CancelScope as CancelScope
from ._core._tasks import create_task_group as create_task_group
from ._core._tasks import current_effective_deadline as current_effective_deadline
from ._core._tasks import fail_after as fail_after
from ._core._tasks import move_on_after as move_on_after
from ._core._tempfile import NamedTemporaryFile as NamedTemporaryFile
from ._core._tempfile import SpooledTemporaryFile as SpooledTemporaryFile
from ._core._tempfile import TemporaryDirectory as TemporaryDirectory
from ._core._tempfile import TemporaryFile as TemporaryFile
from ._core._tempfile import gettempdir as gettempdir
from ._core._tempfile import gettempdirb as gettempdirb
from ._core._tempfile import mkdtemp as mkdtemp
from ._core._tempfile import mkstemp as mkstemp
from ._core._testing import TaskInfo as TaskInfo
from ._core._testing import get_current_task as get_current_task
from ._core._testing import get_running_tasks as get_running_tasks
from ._core._testing import wait_all_tasks_blocked as wait_all_tasks_blocked
from ._core._typedattr import TypedAttributeProvider as TypedAttributeProvider
from ._core._typedattr import TypedAttributeSet as TypedAttributeSet
from ._core._typedattr import typed_attribute as typed_attribute
# Re-export imports so they look like they live directly in this package
for __value in list(locals().values()):
if getattr(__value, "__module__", "").startswith("anyio."):
__value.__module__ = __name__
del __value
|
from __future__ import annotations
import math
import sys
from abc import ABCMeta, abstractmethod
from collections.abc import AsyncIterator, Awaitable, Callable, Sequence
from contextlib import AbstractContextManager
from os import PathLike
from signal import Signals
from socket import AddressFamily, SocketKind, socket
from typing import (
IO,
TYPE_CHECKING,
Any,
TypeVar,
Union,
overload,
)
if sys.version_info >= (3, 11):
from typing import TypeVarTuple, Unpack
else:
from typing_extensions import TypeVarTuple, Unpack
if sys.version_info >= (3, 10):
from typing import TypeAlias
else:
from typing_extensions import TypeAlias
if TYPE_CHECKING:
from _typeshed import HasFileno
from .._core._synchronization import CapacityLimiter, Event, Lock, Semaphore
from .._core._tasks import CancelScope
from .._core._testing import TaskInfo
from ..from_thread import BlockingPortal
from ._sockets import (
ConnectedUDPSocket,
ConnectedUNIXDatagramSocket,
IPSockAddrType,
SocketListener,
SocketStream,
UDPSocket,
UNIXDatagramSocket,
UNIXSocketStream,
)
from ._subprocesses import Process
from ._tasks import TaskGroup
from ._testing import TestRunner
T_Retval = TypeVar("T_Retval")
PosArgsT = TypeVarTuple("PosArgsT")
StrOrBytesPath: TypeAlias = Union[str, bytes, "PathLike[str]", "PathLike[bytes]"]
class AsyncBackend(metaclass=ABCMeta):
@classmethod
@abstractmethod
def run(
cls,
func: Callable[[Unpack[PosArgsT]], Awaitable[T_Retval]],
args: tuple[Unpack[PosArgsT]],
kwargs: dict[str, Any],
options: dict[str, Any],
) -> T_Retval:
"""
Run the given coroutine function in an asynchronous event loop.
The current thread must not be already running an event loop.
:param func: a coroutine function
:param args: positional arguments to ``func``
:param kwargs: positional arguments to ``func``
:param options: keyword arguments to call the backend ``run()`` implementation
with
:return: the return value of the coroutine function
"""
@classmethod
@abstractmethod
def current_token(cls) -> object:
"""
:return:
"""
@classmethod
@abstractmethod
def current_time(cls) -> float:
"""
Return the current value of the event loop's internal clock.
:return: the clock value (seconds)
"""
@classmethod
@abstractmethod
def cancelled_exception_class(cls) -> type[BaseException]:
"""Return the exception class that is raised in a task if it's cancelled."""
@classmethod
@abstractmethod
async def checkpoint(cls) -> None:
"""
Check if the task has been cancelled, and allow rescheduling of other tasks.
This is effectively the same as running :meth:`checkpoint_if_cancelled` and then
:meth:`cancel_shielded_checkpoint`.
"""
@classmethod
async def checkpoint_if_cancelled(cls) -> None:
"""
Check if the current task group has been cancelled.
This will check if the task has been cancelled, but will not allow other tasks
to be scheduled if not.
"""
if cls.current_effective_deadline() == -math.inf:
await cls.checkpoint()
@classmethod
async def cancel_shielded_checkpoint(cls) -> None:
"""
Allow the rescheduling of other tasks.
This will give other tasks the opportunity to run, but without checking if the
current task group has been cancelled, unlike with :meth:`checkpoint`.
"""
with cls.create_cancel_scope(shield=True):
await cls.sleep(0)
@classmethod
@abstractmethod
async def sleep(cls, delay: float) -> None:
"""
Pause the current task for the specified duration.
:param delay: the duration, in seconds
"""
@classmethod
@abstractmethod
def create_cancel_scope(
cls, *, deadline: float = math.inf, shield: bool = False
) -> CancelScope:
pass
@classmethod
@abstractmethod
def current_effective_deadline(cls) -> float:
"""
Return the nearest deadline among all the cancel scopes effective for the
current task.
:return:
- a clock value from the event loop's internal clock
- ``inf`` if there is no deadline in effect
- ``-inf`` if the current scope has been cancelled
:rtype: float
"""
@classmethod
@abstractmethod
def create_task_group(cls) -> TaskGroup:
pass
@classmethod
@abstractmethod
def create_event(cls) -> Event:
pass
@classmethod
@abstractmethod
def create_lock(cls, *, fast_acquire: bool) -> Lock:
pass
@classmethod
@abstractmethod
def create_semaphore(
cls,
initial_value: int,
*,
max_value: int | None = None,
fast_acquire: bool = False,
) -> Semaphore:
pass
@classmethod
@abstractmethod
def create_capacity_limiter(cls, total_tokens: float) -> CapacityLimiter:
pass
@classmethod
@abstractmethod
async def run_sync_in_worker_thread(
cls,
func: Callable[[Unpack[PosArgsT]], T_Retval],
args: tuple[Unpack[PosArgsT]],
abandon_on_cancel: bool = False,
limiter: CapacityLimiter | None = None,
) -> T_Retval:
pass
@classmethod
@abstractmethod
def check_cancelled(cls) -> None:
pass
@classmethod
@abstractmethod
def run_async_from_thread(
cls,
func: Callable[[Unpack[PosArgsT]], Awaitable[T_Retval]],
args: tuple[Unpack[PosArgsT]],
token: object,
) -> T_Retval:
pass
@classmethod
@abstractmethod
def run_sync_from_thread(
cls,
func: Callable[[Unpack[PosArgsT]], T_Retval],
args: tuple[Unpack[PosArgsT]],
token: object,
) -> T_Retval:
pass
@classmethod
@abstractmethod
def create_blocking_portal(cls) -> BlockingPortal:
pass
@classmethod
@abstractmethod
async def open_process(
cls,
command: StrOrBytesPath | Sequence[StrOrBytesPath],
*,
stdin: int | IO[Any] | None,
stdout: int | IO[Any] | None,
stderr: int | IO[Any] | None,
**kwargs: Any,
) -> Process:
pass
@classmethod
@abstractmethod
def setup_process_pool_exit_at_shutdown(cls, workers: set[Process]) -> None:
pass
@classmethod
@abstractmethod
async def connect_tcp(
cls, host: str, port: int, local_address: IPSockAddrType | None = None
) -> SocketStream:
pass
@classmethod
@abstractmethod
async def connect_unix(cls, path: str | bytes) -> UNIXSocketStream:
pass
@classmethod
@abstractmethod
def create_tcp_listener(cls, sock: socket) -> SocketListener:
pass
@classmethod
@abstractmethod
def create_unix_listener(cls, sock: socket) -> SocketListener:
pass
@classmethod
@abstractmethod
async def create_udp_socket(
cls,
family: AddressFamily,
local_address: IPSockAddrType | None,
remote_address: IPSockAddrType | None,
reuse_port: bool,
) -> UDPSocket | ConnectedUDPSocket:
pass
@classmethod
@overload
async def create_unix_datagram_socket(
cls, raw_socket: socket, remote_path: None
) -> UNIXDatagramSocket: ...
@classmethod
@overload
async def create_unix_datagram_socket(
cls, raw_socket: socket, remote_path: str | bytes
) -> ConnectedUNIXDatagramSocket: ...
@classmethod
@abstractmethod
async def create_unix_datagram_socket(
cls, raw_socket: socket, remote_path: str | bytes | None
) -> UNIXDatagramSocket | ConnectedUNIXDatagramSocket:
pass
@classmethod
@abstractmethod
async def getaddrinfo(
cls,
host: bytes | str | None,
port: str | int | None,
*,
family: int | AddressFamily = 0,
type: int | SocketKind = 0,
proto: int = 0,
flags: int = 0,
) -> Sequence[
tuple[
AddressFamily,
SocketKind,
int,
str,
tuple[str, int] | tuple[str, int, int, int] | tuple[int, bytes],
]
]:
pass
@classmethod
@abstractmethod
async def getnameinfo(
cls, sockaddr: IPSockAddrType, flags: int = 0
) -> tuple[str, str]:
pass
@classmethod
@abstractmethod
async def wait_readable(cls, obj: HasFileno | int) -> None:
pass
@classmethod
@abstractmethod
async def wait_writable(cls, obj: HasFileno | int) -> None:
pass
@classmethod
@abstractmethod
def current_default_thread_limiter(cls) -> CapacityLimiter:
pass
@classmethod
@abstractmethod
def open_signal_receiver(
cls, *signals: Signals
) -> AbstractContextManager[AsyncIterator[Signals]]:
pass
@classmethod
@abstractmethod
def get_current_task(cls) -> TaskInfo:
pass
@classmethod
@abstractmethod
def get_running_tasks(cls) -> Sequence[TaskInfo]:
pass
@classmethod
@abstractmethod
async def wait_all_tasks_blocked(cls) -> None:
pass
@classmethod
@abstractmethod
def create_test_runner(cls, options: dict[str, Any]) -> TestRunner:
pass
|
from __future__ import annotations
from abc import ABCMeta, abstractmethod
from types import TracebackType
from typing import TypeVar
T = TypeVar("T")
class AsyncResource(metaclass=ABCMeta):
"""
Abstract base class for all closeable asynchronous resources.
Works as an asynchronous context manager which returns the instance itself on enter,
and calls :meth:`aclose` on exit.
"""
__slots__ = ()
async def __aenter__(self: T) -> T:
return self
async def __aexit__(
self,
exc_type: type[BaseException] | None,
exc_val: BaseException | None,
exc_tb: TracebackType | None,
) -> None:
await self.aclose()
@abstractmethod
async def aclose(self) -> None:
"""Close the resource."""
|
from __future__ import annotations
import socket
from abc import abstractmethod
from collections.abc import Callable, Collection, Mapping
from contextlib import AsyncExitStack
from io import IOBase
from ipaddress import IPv4Address, IPv6Address
from socket import AddressFamily
from types import TracebackType
from typing import Any, TypeVar, Union
from .._core._typedattr import (
TypedAttributeProvider,
TypedAttributeSet,
typed_attribute,
)
from ._streams import ByteStream, Listener, UnreliableObjectStream
from ._tasks import TaskGroup
IPAddressType = Union[str, IPv4Address, IPv6Address]
IPSockAddrType = tuple[str, int]
SockAddrType = Union[IPSockAddrType, str]
UDPPacketType = tuple[bytes, IPSockAddrType]
UNIXDatagramPacketType = tuple[bytes, str]
T_Retval = TypeVar("T_Retval")
class _NullAsyncContextManager:
async def __aenter__(self) -> None:
pass
async def __aexit__(
self,
exc_type: type[BaseException] | None,
exc_val: BaseException | None,
exc_tb: TracebackType | None,
) -> bool | None:
return None
class SocketAttribute(TypedAttributeSet):
#: the address family of the underlying socket
family: AddressFamily = typed_attribute()
#: the local socket address of the underlying socket
local_address: SockAddrType = typed_attribute()
#: for IP addresses, the local port the underlying socket is bound to
local_port: int = typed_attribute()
#: the underlying stdlib socket object
raw_socket: socket.socket = typed_attribute()
#: the remote address the underlying socket is connected to
remote_address: SockAddrType = typed_attribute()
#: for IP addresses, the remote port the underlying socket is connected to
remote_port: int = typed_attribute()
class _SocketProvider(TypedAttributeProvider):
@property
def extra_attributes(self) -> Mapping[Any, Callable[[], Any]]:
from .._core._sockets import convert_ipv6_sockaddr as convert
attributes: dict[Any, Callable[[], Any]] = {
SocketAttribute.family: lambda: self._raw_socket.family,
SocketAttribute.local_address: lambda: convert(
self._raw_socket.getsockname()
),
SocketAttribute.raw_socket: lambda: self._raw_socket,
}
try:
peername: tuple[str, int] | None = convert(self._raw_socket.getpeername())
except OSError:
peername = None
# Provide the remote address for connected sockets
if peername is not None:
attributes[SocketAttribute.remote_address] = lambda: peername
# Provide local and remote ports for IP based sockets
if self._raw_socket.family in (AddressFamily.AF_INET, AddressFamily.AF_INET6):
attributes[SocketAttribute.local_port] = (
lambda: self._raw_socket.getsockname()[1]
)
if peername is not None:
remote_port = peername[1]
attributes[SocketAttribute.remote_port] = lambda: remote_port
return attributes
@property
@abstractmethod
def _raw_socket(self) -> socket.socket:
pass
class SocketStream(ByteStream, _SocketProvider):
"""
Transports bytes over a socket.
Supports all relevant extra attributes from :class:`~SocketAttribute`.
"""
class UNIXSocketStream(SocketStream):
@abstractmethod
async def send_fds(self, message: bytes, fds: Collection[int | IOBase]) -> None:
"""
Send file descriptors along with a message to the peer.
:param message: a non-empty bytestring
:param fds: a collection of files (either numeric file descriptors or open file
or socket objects)
"""
@abstractmethod
async def receive_fds(self, msglen: int, maxfds: int) -> tuple[bytes, list[int]]:
"""
Receive file descriptors along with a message from the peer.
:param msglen: length of the message to expect from the peer
:param maxfds: maximum number of file descriptors to expect from the peer
:return: a tuple of (message, file descriptors)
"""
class SocketListener(Listener[SocketStream], _SocketProvider):
"""
Listens to incoming socket connections.
Supports all relevant extra attributes from :class:`~SocketAttribute`.
"""
@abstractmethod
async def accept(self) -> SocketStream:
"""Accept an incoming connection."""
async def serve(
self,
handler: Callable[[SocketStream], Any],
task_group: TaskGroup | None = None,
) -> None:
from .. import create_task_group
async with AsyncExitStack() as stack:
if task_group is None:
task_group = await stack.enter_async_context(create_task_group())
while True:
stream = await self.accept()
task_group.start_soon(handler, stream)
class UDPSocket(UnreliableObjectStream[UDPPacketType], _SocketProvider):
"""
Represents an unconnected UDP socket.
Supports all relevant extra attributes from :class:`~SocketAttribute`.
"""
async def sendto(self, data: bytes, host: str, port: int) -> None:
"""
Alias for :meth:`~.UnreliableObjectSendStream.send` ((data, (host, port))).
"""
return await self.send((data, (host, port)))
class ConnectedUDPSocket(UnreliableObjectStream[bytes], _SocketProvider):
"""
Represents an connected UDP socket.
Supports all relevant extra attributes from :class:`~SocketAttribute`.
"""
class UNIXDatagramSocket(
UnreliableObjectStream[UNIXDatagramPacketType], _SocketProvider
):
"""
Represents an unconnected Unix datagram socket.
Supports all relevant extra attributes from :class:`~SocketAttribute`.
"""
async def sendto(self, data: bytes, path: str) -> None:
"""Alias for :meth:`~.UnreliableObjectSendStream.send` ((data, path))."""
return await self.send((data, path))
class ConnectedUNIXDatagramSocket(UnreliableObjectStream[bytes], _SocketProvider):
"""
Represents a connected Unix datagram socket.
Supports all relevant extra attributes from :class:`~SocketAttribute`.
"""
|
from __future__ import annotations
from abc import abstractmethod
from collections.abc import Callable
from typing import Any, Generic, TypeVar, Union
from .._core._exceptions import EndOfStream
from .._core._typedattr import TypedAttributeProvider
from ._resources import AsyncResource
from ._tasks import TaskGroup
T_Item = TypeVar("T_Item")
T_co = TypeVar("T_co", covariant=True)
T_contra = TypeVar("T_contra", contravariant=True)
class UnreliableObjectReceiveStream(
Generic[T_co], AsyncResource, TypedAttributeProvider
):
"""
An interface for receiving objects.
This interface makes no guarantees that the received messages arrive in the order in
which they were sent, or that no messages are missed.
Asynchronously iterating over objects of this type will yield objects matching the
given type parameter.
"""
def __aiter__(self) -> UnreliableObjectReceiveStream[T_co]:
return self
async def __anext__(self) -> T_co:
try:
return await self.receive()
except EndOfStream:
raise StopAsyncIteration
@abstractmethod
async def receive(self) -> T_co:
"""
Receive the next item.
:raises ~anyio.ClosedResourceError: if the receive stream has been explicitly
closed
:raises ~anyio.EndOfStream: if this stream has been closed from the other end
:raises ~anyio.BrokenResourceError: if this stream has been rendered unusable
due to external causes
"""
class UnreliableObjectSendStream(
Generic[T_contra], AsyncResource, TypedAttributeProvider
):
"""
An interface for sending objects.
This interface makes no guarantees that the messages sent will reach the
recipient(s) in the same order in which they were sent, or at all.
"""
@abstractmethod
async def send(self, item: T_contra) -> None:
"""
Send an item to the peer(s).
:param item: the item to send
:raises ~anyio.ClosedResourceError: if the send stream has been explicitly
closed
:raises ~anyio.BrokenResourceError: if this stream has been rendered unusable
due to external causes
"""
class UnreliableObjectStream(
UnreliableObjectReceiveStream[T_Item], UnreliableObjectSendStream[T_Item]
):
"""
A bidirectional message stream which does not guarantee the order or reliability of
message delivery.
"""
class ObjectReceiveStream(UnreliableObjectReceiveStream[T_co]):
"""
A receive message stream which guarantees that messages are received in the same
order in which they were sent, and that no messages are missed.
"""
class ObjectSendStream(UnreliableObjectSendStream[T_contra]):
"""
A send message stream which guarantees that messages are delivered in the same order
in which they were sent, without missing any messages in the middle.
"""
class ObjectStream(
ObjectReceiveStream[T_Item],
ObjectSendStream[T_Item],
UnreliableObjectStream[T_Item],
):
"""
A bidirectional message stream which guarantees the order and reliability of message
delivery.
"""
@abstractmethod
async def send_eof(self) -> None:
"""
Send an end-of-file indication to the peer.
You should not try to send any further data to this stream after calling this
method. This method is idempotent (does nothing on successive calls).
"""
class ByteReceiveStream(AsyncResource, TypedAttributeProvider):
"""
An interface for receiving bytes from a single peer.
Iterating this byte stream will yield a byte string of arbitrary length, but no more
than 65536 bytes.
"""
def __aiter__(self) -> ByteReceiveStream:
return self
async def __anext__(self) -> bytes:
try:
return await self.receive()
except EndOfStream:
raise StopAsyncIteration
@abstractmethod
async def receive(self, max_bytes: int = 65536) -> bytes:
"""
Receive at most ``max_bytes`` bytes from the peer.
.. note:: Implementers of this interface should not return an empty
:class:`bytes` object, and users should ignore them.
:param max_bytes: maximum number of bytes to receive
:return: the received bytes
:raises ~anyio.EndOfStream: if this stream has been closed from the other end
"""
class ByteSendStream(AsyncResource, TypedAttributeProvider):
"""An interface for sending bytes to a single peer."""
@abstractmethod
async def send(self, item: bytes) -> None:
"""
Send the given bytes to the peer.
:param item: the bytes to send
"""
class ByteStream(ByteReceiveStream, ByteSendStream):
"""A bidirectional byte stream."""
@abstractmethod
async def send_eof(self) -> None:
"""
Send an end-of-file indication to the peer.
You should not try to send any further data to this stream after calling this
method. This method is idempotent (does nothing on successive calls).
"""
#: Type alias for all unreliable bytes-oriented receive streams.
AnyUnreliableByteReceiveStream = Union[
UnreliableObjectReceiveStream[bytes], ByteReceiveStream
]
#: Type alias for all unreliable bytes-oriented send streams.
AnyUnreliableByteSendStream = Union[UnreliableObjectSendStream[bytes], ByteSendStream]
#: Type alias for all unreliable bytes-oriented streams.
AnyUnreliableByteStream = Union[UnreliableObjectStream[bytes], ByteStream]
#: Type alias for all bytes-oriented receive streams.
AnyByteReceiveStream = Union[ObjectReceiveStream[bytes], ByteReceiveStream]
#: Type alias for all bytes-oriented send streams.
AnyByteSendStream = Union[ObjectSendStream[bytes], ByteSendStream]
#: Type alias for all bytes-oriented streams.
AnyByteStream = Union[ObjectStream[bytes], ByteStream]
class Listener(Generic[T_co], AsyncResource, TypedAttributeProvider):
"""An interface for objects that let you accept incoming connections."""
@abstractmethod
async def serve(
self, handler: Callable[[T_co], Any], task_group: TaskGroup | None = None
) -> None:
"""
Accept incoming connections as they come in and start tasks to handle them.
:param handler: a callable that will be used to handle each accepted connection
:param task_group: the task group that will be used to start tasks for handling
each accepted connection (if omitted, an ad-hoc task group will be created)
"""
|
from __future__ import annotations
from abc import abstractmethod
from signal import Signals
from ._resources import AsyncResource
from ._streams import ByteReceiveStream, ByteSendStream
class Process(AsyncResource):
"""An asynchronous version of :class:`subprocess.Popen`."""
@abstractmethod
async def wait(self) -> int:
"""
Wait until the process exits.
:return: the exit code of the process
"""
@abstractmethod
def terminate(self) -> None:
"""
Terminates the process, gracefully if possible.
On Windows, this calls ``TerminateProcess()``.
On POSIX systems, this sends ``SIGTERM`` to the process.
.. seealso:: :meth:`subprocess.Popen.terminate`
"""
@abstractmethod
def kill(self) -> None:
"""
Kills the process.
On Windows, this calls ``TerminateProcess()``.
On POSIX systems, this sends ``SIGKILL`` to the process.
.. seealso:: :meth:`subprocess.Popen.kill`
"""
@abstractmethod
def send_signal(self, signal: Signals) -> None:
"""
Send a signal to the subprocess.
.. seealso:: :meth:`subprocess.Popen.send_signal`
:param signal: the signal number (e.g. :data:`signal.SIGHUP`)
"""
@property
@abstractmethod
def pid(self) -> int:
"""The process ID of the process."""
@property
@abstractmethod
def returncode(self) -> int | None:
"""
The return code of the process. If the process has not yet terminated, this will
be ``None``.
"""
@property
@abstractmethod
def stdin(self) -> ByteSendStream | None:
"""The stream for the standard input of the process."""
@property
@abstractmethod
def stdout(self) -> ByteReceiveStream | None:
"""The stream for the standard output of the process."""
@property
@abstractmethod
def stderr(self) -> ByteReceiveStream | None:
"""The stream for the standard error output of the process."""
|
from __future__ import annotations
import sys
from abc import ABCMeta, abstractmethod
from collections.abc import Awaitable, Callable
from types import TracebackType
from typing import TYPE_CHECKING, Any, Protocol, TypeVar, overload
if sys.version_info >= (3, 11):
from typing import TypeVarTuple, Unpack
else:
from typing_extensions import TypeVarTuple, Unpack
if TYPE_CHECKING:
from .._core._tasks import CancelScope
T_Retval = TypeVar("T_Retval")
T_contra = TypeVar("T_contra", contravariant=True)
PosArgsT = TypeVarTuple("PosArgsT")
class TaskStatus(Protocol[T_contra]):
@overload
def started(self: TaskStatus[None]) -> None: ...
@overload
def started(self, value: T_contra) -> None: ...
def started(self, value: T_contra | None = None) -> None:
"""
Signal that the task has started.
:param value: object passed back to the starter of the task
"""
class TaskGroup(metaclass=ABCMeta):
"""
Groups several asynchronous tasks together.
:ivar cancel_scope: the cancel scope inherited by all child tasks
:vartype cancel_scope: CancelScope
.. note:: On asyncio, support for eager task factories is considered to be
**experimental**. In particular, they don't follow the usual semantics of new
tasks being scheduled on the next iteration of the event loop, and may thus
cause unexpected behavior in code that wasn't written with such semantics in
mind.
"""
cancel_scope: CancelScope
@abstractmethod
def start_soon(
self,
func: Callable[[Unpack[PosArgsT]], Awaitable[Any]],
*args: Unpack[PosArgsT],
name: object = None,
) -> None:
"""
Start a new task in this task group.
:param func: a coroutine function
:param args: positional arguments to call the function with
:param name: name of the task, for the purposes of introspection and debugging
.. versionadded:: 3.0
"""
@abstractmethod
async def start(
self,
func: Callable[..., Awaitable[Any]],
*args: object,
name: object = None,
) -> Any:
"""
Start a new task and wait until it signals for readiness.
:param func: a coroutine function
:param args: positional arguments to call the function with
:param name: name of the task, for the purposes of introspection and debugging
:return: the value passed to ``task_status.started()``
:raises RuntimeError: if the task finishes without calling
``task_status.started()``
.. versionadded:: 3.0
"""
@abstractmethod
async def __aenter__(self) -> TaskGroup:
"""Enter the task group context and allow starting new tasks."""
@abstractmethod
async def __aexit__(
self,
exc_type: type[BaseException] | None,
exc_val: BaseException | None,
exc_tb: TracebackType | None,
) -> bool | None:
"""Exit the task group context waiting for all tasks to finish."""
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.